ZTWHHH commited on
Commit
729dfb9
·
verified ·
1 Parent(s): 90a43e2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. deepseek/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 +3 -0
  3. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ArrayBase.h +222 -0
  4. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ArrayWrapper.h +173 -0
  5. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Assign.h +80 -0
  6. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/BandMatrix.h +338 -0
  7. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CommaInitializer.h +149 -0
  8. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ConditionEstimator.h +173 -0
  9. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseBinaryOp.h +166 -0
  10. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseNullaryOp.h +971 -0
  11. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseTernaryOp.h +171 -0
  12. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseUnaryOp.h +91 -0
  13. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseUnaryView.h +167 -0
  14. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DenseBase.h +647 -0
  15. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DenseCoeffsBase.h +569 -0
  16. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DenseStorage.h +650 -0
  17. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DeviceWrapper.h +155 -0
  18. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Diagonal.h +221 -0
  19. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DiagonalMatrix.h +414 -0
  20. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DiagonalProduct.h +30 -0
  21. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Dot.h +289 -0
  22. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ForceAlignedAccess.h +131 -0
  23. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Fuzzy.h +132 -0
  24. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/GeneralProduct.h +517 -0
  25. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/GenericPacketMath.h +1527 -0
  26. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/GlobalFunctions.h +229 -0
  27. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/IO.h +233 -0
  28. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/InternalHeaderCheck.h +3 -0
  29. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Inverse.h +108 -0
  30. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Map.h +153 -0
  31. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/MapBase.h +283 -0
  32. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/MathFunctionsImpl.h +262 -0
  33. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Matrix.h +528 -0
  34. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/MatrixBase.h +542 -0
  35. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Product.h +307 -0
  36. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/RandomImpl.h +253 -0
  37. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Redux.h +528 -0
  38. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Ref.h +383 -0
  39. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Replicate.h +130 -0
  40. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Reshaped.h +398 -0
  41. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ReturnByValue.h +115 -0
  42. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Reverse.h +196 -0
  43. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Select.h +156 -0
  44. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SelfAdjointView.h +329 -0
  45. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SkewSymmetricMatrix3.h +382 -0
  46. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Solve.h +174 -0
  47. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SolveTriangular.h +237 -0
  48. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SolverBase.h +159 -0
  49. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/StlIterators.h +620 -0
  50. infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Swap.h +74 -0
.gitattributes CHANGED
@@ -1705,3 +1705,4 @@ infer_4_30_0/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycac
1705
  infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/keras/__pycache__/metrics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1706
  evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text
1707
  evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 filter=lfs diff=lfs merge=lfs -text
 
 
1705
  infer_4_30_0/lib/python3.10/site-packages/tensorflow/python/keras/__pycache__/metrics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1706
  evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text
1707
  evalkit_cambrian/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.11 filter=lfs diff=lfs merge=lfs -text
1708
+ deepseek/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text
deepseek/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47662749a295f771b92abe8d99dcd5f151953d56069a19f43977b97868ec21eb
3
+ size 82303400
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ArrayBase.h ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_ARRAYBASE_H
11
+ #define EIGEN_ARRAYBASE_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ template <typename ExpressionType>
19
+ class MatrixWrapper;
20
+
21
+ /** \class ArrayBase
22
+ * \ingroup Core_Module
23
+ *
24
+ * \brief Base class for all 1D and 2D array, and related expressions
25
+ *
26
+ * An array is similar to a dense vector or matrix. While matrices are mathematical
27
+ * objects with well defined linear algebra operators, an array is just a collection
28
+ * of scalar values arranged in a one or two dimensional fashion. As the main consequence,
29
+ * all operations applied to an array are performed coefficient wise. Furthermore,
30
+ * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient
31
+ * constructors allowing to easily write generic code working for both scalar values
32
+ * and arrays.
33
+ *
34
+ * This class is the base that is inherited by all array expression types.
35
+ *
36
+ * \tparam Derived is the derived type, e.g., an array or an expression type.
37
+ *
38
+ * This class can be extended with the help of the plugin mechanism described on the page
39
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN.
40
+ *
41
+ * \sa class MatrixBase, \ref TopicClassHierarchy
42
+ */
43
+ template <typename Derived>
44
+ class ArrayBase : public DenseBase<Derived> {
45
+ public:
46
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
47
+ /** The base class for a given storage type. */
48
+ typedef ArrayBase StorageBaseType;
49
+
50
+ typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
51
+
52
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
53
+ typedef typename internal::traits<Derived>::Scalar Scalar;
54
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
55
+ typedef typename NumTraits<Scalar>::Real RealScalar;
56
+
57
+ typedef DenseBase<Derived> Base;
58
+ using Base::ColsAtCompileTime;
59
+ using Base::Flags;
60
+ using Base::IsVectorAtCompileTime;
61
+ using Base::MaxColsAtCompileTime;
62
+ using Base::MaxRowsAtCompileTime;
63
+ using Base::MaxSizeAtCompileTime;
64
+ using Base::RowsAtCompileTime;
65
+ using Base::SizeAtCompileTime;
66
+
67
+ using Base::coeff;
68
+ using Base::coeffRef;
69
+ using Base::cols;
70
+ using Base::const_cast_derived;
71
+ using Base::derived;
72
+ using Base::lazyAssign;
73
+ using Base::rows;
74
+ using Base::size;
75
+ using Base::operator-;
76
+ using Base::operator=;
77
+ using Base::operator+=;
78
+ using Base::operator-=;
79
+ using Base::operator*=;
80
+ using Base::operator/=;
81
+
82
+ typedef typename Base::CoeffReturnType CoeffReturnType;
83
+
84
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
85
+
86
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
87
+ typedef typename Base::PlainObject PlainObject;
88
+
89
+ /** \internal Represents a matrix with all coefficients equal to one another*/
90
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> ConstantReturnType;
91
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
92
+
93
+ #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase
94
+ #define EIGEN_DOC_UNARY_ADDONS(X, Y)
95
+ #include "../plugins/MatrixCwiseUnaryOps.inc"
96
+ #include "../plugins/ArrayCwiseUnaryOps.inc"
97
+ #include "../plugins/CommonCwiseBinaryOps.inc"
98
+ #include "../plugins/MatrixCwiseBinaryOps.inc"
99
+ #include "../plugins/ArrayCwiseBinaryOps.inc"
100
+ #ifdef EIGEN_ARRAYBASE_PLUGIN
101
+ #include EIGEN_ARRAYBASE_PLUGIN
102
+ #endif
103
+ #undef EIGEN_CURRENT_STORAGE_BASE_CLASS
104
+ #undef EIGEN_DOC_UNARY_ADDONS
105
+
106
+ /** Special case of the template operator=, in order to prevent the compiler
107
+ * from generating a default operator= (issue hit with g++ 4.1)
108
+ */
109
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ArrayBase& other) {
110
+ internal::call_assignment(derived(), other.derived());
111
+ return derived();
112
+ }
113
+
114
+ /** Set all the entries to \a value.
115
+ * \sa DenseBase::setConstant(), DenseBase::fill() */
116
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Scalar& value) {
117
+ Base::setConstant(value);
118
+ return derived();
119
+ }
120
+
121
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const Scalar& scalar);
122
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const Scalar& scalar);
123
+
124
+ template <typename OtherDerived>
125
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const ArrayBase<OtherDerived>& other);
126
+ template <typename OtherDerived>
127
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const ArrayBase<OtherDerived>& other);
128
+
129
+ template <typename OtherDerived>
130
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const ArrayBase<OtherDerived>& other);
131
+
132
+ template <typename OtherDerived>
133
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const ArrayBase<OtherDerived>& other);
134
+
135
+ public:
136
+ EIGEN_DEVICE_FUNC ArrayBase<Derived>& array() { return *this; }
137
+ EIGEN_DEVICE_FUNC const ArrayBase<Derived>& array() const { return *this; }
138
+
139
+ /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array
140
+ * \sa MatrixBase::array() */
141
+ EIGEN_DEVICE_FUNC MatrixWrapper<Derived> matrix() { return MatrixWrapper<Derived>(derived()); }
142
+ EIGEN_DEVICE_FUNC const MatrixWrapper<const Derived> matrix() const {
143
+ return MatrixWrapper<const Derived>(derived());
144
+ }
145
+
146
+ // template<typename Dest>
147
+ // inline void evalTo(Dest& dst) const { dst = matrix(); }
148
+
149
+ protected:
150
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase)
151
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase)
152
+
153
+ private:
154
+ explicit ArrayBase(Index);
155
+ ArrayBase(Index, Index);
156
+ template <typename OtherDerived>
157
+ explicit ArrayBase(const ArrayBase<OtherDerived>&);
158
+
159
+ protected:
160
+ // mixing arrays and matrices is not legal
161
+ template <typename OtherDerived>
162
+ Derived& operator+=(const MatrixBase<OtherDerived>&) {
163
+ EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
164
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
165
+ return *this;
166
+ }
167
+ // mixing arrays and matrices is not legal
168
+ template <typename OtherDerived>
169
+ Derived& operator-=(const MatrixBase<OtherDerived>&) {
170
+ EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
171
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
172
+ return *this;
173
+ }
174
+ };
175
+
176
+ /** replaces \c *this by \c *this - \a other.
177
+ *
178
+ * \returns a reference to \c *this
179
+ */
180
+ template <typename Derived>
181
+ template <typename OtherDerived>
182
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived>& other) {
183
+ call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>());
184
+ return derived();
185
+ }
186
+
187
+ /** replaces \c *this by \c *this + \a other.
188
+ *
189
+ * \returns a reference to \c *this
190
+ */
191
+ template <typename Derived>
192
+ template <typename OtherDerived>
193
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other) {
194
+ call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar, typename OtherDerived::Scalar>());
195
+ return derived();
196
+ }
197
+
198
+ /** replaces \c *this by \c *this * \a other coefficient wise.
199
+ *
200
+ * \returns a reference to \c *this
201
+ */
202
+ template <typename Derived>
203
+ template <typename OtherDerived>
204
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other) {
205
+ call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar, typename OtherDerived::Scalar>());
206
+ return derived();
207
+ }
208
+
209
+ /** replaces \c *this by \c *this / \a other coefficient wise.
210
+ *
211
+ * \returns a reference to \c *this
212
+ */
213
+ template <typename Derived>
214
+ template <typename OtherDerived>
215
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other) {
216
+ call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar, typename OtherDerived::Scalar>());
217
+ return derived();
218
+ }
219
+
220
+ } // end namespace Eigen
221
+
222
+ #endif // EIGEN_ARRAYBASE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ArrayWrapper.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_ARRAYWRAPPER_H
11
+ #define EIGEN_ARRAYWRAPPER_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ /** \class ArrayWrapper
19
+ * \ingroup Core_Module
20
+ *
21
+ * \brief Expression of a mathematical vector or matrix as an array object
22
+ *
23
+ * This class is the return type of MatrixBase::array(), and most of the time
24
+ * this is the only way it is use.
25
+ *
26
+ * \sa MatrixBase::array(), class MatrixWrapper
27
+ */
28
+
29
+ namespace internal {
30
+ template <typename ExpressionType>
31
+ struct traits<ArrayWrapper<ExpressionType> > : public traits<remove_all_t<typename ExpressionType::Nested> > {
32
+ typedef ArrayXpr XprKind;
33
+ // Let's remove NestByRefBit
34
+ enum {
35
+ Flags0 = traits<remove_all_t<typename ExpressionType::Nested> >::Flags,
36
+ LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
37
+ Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
38
+ };
39
+ };
40
+ } // namespace internal
41
+
42
+ template <typename ExpressionType>
43
+ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> > {
44
+ public:
45
+ typedef ArrayBase<ArrayWrapper> Base;
46
+ EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
47
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
48
+ typedef internal::remove_all_t<ExpressionType> NestedExpression;
49
+
50
+ typedef std::conditional_t<internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar>
51
+ ScalarWithConstIfNotLvalue;
52
+
53
+ typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;
54
+
55
+ using Base::coeffRef;
56
+
57
+ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
58
+
59
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
60
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
61
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT {
62
+ return m_expression.outerStride();
63
+ }
64
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT {
65
+ return m_expression.innerStride();
66
+ }
67
+
68
+ EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
69
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); }
70
+
71
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
72
+ return m_expression.coeffRef(rowId, colId);
73
+ }
74
+
75
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); }
76
+
77
+ template <typename Dest>
78
+ EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const {
79
+ dst = m_expression;
80
+ }
81
+
82
+ EIGEN_DEVICE_FUNC const internal::remove_all_t<NestedExpressionType>& nestedExpression() const {
83
+ return m_expression;
84
+ }
85
+
86
+ /** Forwards the resizing request to the nested expression
87
+ * \sa DenseBase::resize(Index) */
88
+ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); }
89
+ /** Forwards the resizing request to the nested expression
90
+ * \sa DenseBase::resize(Index,Index)*/
91
+ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows, cols); }
92
+
93
+ protected:
94
+ NestedExpressionType m_expression;
95
+ };
96
+
97
+ /** \class MatrixWrapper
98
+ * \ingroup Core_Module
99
+ *
100
+ * \brief Expression of an array as a mathematical vector or matrix
101
+ *
102
+ * This class is the return type of ArrayBase::matrix(), and most of the time
103
+ * this is the only way it is use.
104
+ *
105
+ * \sa MatrixBase::matrix(), class ArrayWrapper
106
+ */
107
+
108
+ namespace internal {
109
+ template <typename ExpressionType>
110
+ struct traits<MatrixWrapper<ExpressionType> > : public traits<remove_all_t<typename ExpressionType::Nested> > {
111
+ typedef MatrixXpr XprKind;
112
+ // Let's remove NestByRefBit
113
+ enum {
114
+ Flags0 = traits<remove_all_t<typename ExpressionType::Nested> >::Flags,
115
+ LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0,
116
+ Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag
117
+ };
118
+ };
119
+ } // namespace internal
120
+
121
+ template <typename ExpressionType>
122
+ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> > {
123
+ public:
124
+ typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
125
+ EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
126
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
127
+ typedef internal::remove_all_t<ExpressionType> NestedExpression;
128
+
129
+ typedef std::conditional_t<internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar>
130
+ ScalarWithConstIfNotLvalue;
131
+
132
+ typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;
133
+
134
+ using Base::coeffRef;
135
+
136
+ EIGEN_DEVICE_FUNC explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}
137
+
138
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
139
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
140
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT {
141
+ return m_expression.outerStride();
142
+ }
143
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT {
144
+ return m_expression.innerStride();
145
+ }
146
+
147
+ EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
148
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); }
149
+
150
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
151
+ return m_expression.derived().coeffRef(rowId, colId);
152
+ }
153
+
154
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); }
155
+
156
+ EIGEN_DEVICE_FUNC const internal::remove_all_t<NestedExpressionType>& nestedExpression() const {
157
+ return m_expression;
158
+ }
159
+
160
+ /** Forwards the resizing request to the nested expression
161
+ * \sa DenseBase::resize(Index) */
162
+ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); }
163
+ /** Forwards the resizing request to the nested expression
164
+ * \sa DenseBase::resize(Index,Index)*/
165
+ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows, cols); }
166
+
167
+ protected:
168
+ NestedExpressionType m_expression;
169
+ };
170
+
171
+ } // end namespace Eigen
172
+
173
+ #endif // EIGEN_ARRAYWRAPPER_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Assign.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
5
+ // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ #ifndef EIGEN_ASSIGN_H
13
+ #define EIGEN_ASSIGN_H
14
+
15
+ // IWYU pragma: private
16
+ #include "./InternalHeaderCheck.h"
17
+
18
+ namespace Eigen {
19
+
20
+ template <typename Derived>
21
+ template <typename OtherDerived>
22
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::lazyAssign(const DenseBase<OtherDerived>& other) {
23
+ enum { SameType = internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value };
24
+
25
+ EIGEN_STATIC_ASSERT_LVALUE(Derived)
26
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived, OtherDerived)
27
+ EIGEN_STATIC_ASSERT(
28
+ SameType,
29
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
30
+
31
+ eigen_assert(rows() == other.rows() && cols() == other.cols());
32
+ internal::call_assignment_no_alias(derived(), other.derived());
33
+
34
+ return derived();
35
+ }
36
+
37
+ template <typename Derived>
38
+ template <typename OtherDerived>
39
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other) {
40
+ internal::call_assignment(derived(), other.derived());
41
+ return derived();
42
+ }
43
+
44
+ template <typename Derived>
45
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other) {
46
+ internal::call_assignment(derived(), other.derived());
47
+ return derived();
48
+ }
49
+
50
+ template <typename Derived>
51
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other) {
52
+ internal::call_assignment(derived(), other.derived());
53
+ return derived();
54
+ }
55
+
56
+ template <typename Derived>
57
+ template <typename OtherDerived>
58
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other) {
59
+ internal::call_assignment(derived(), other.derived());
60
+ return derived();
61
+ }
62
+
63
+ template <typename Derived>
64
+ template <typename OtherDerived>
65
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other) {
66
+ internal::call_assignment(derived(), other.derived());
67
+ return derived();
68
+ }
69
+
70
+ template <typename Derived>
71
+ template <typename OtherDerived>
72
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(
73
+ const ReturnByValue<OtherDerived>& other) {
74
+ other.derived().evalTo(derived());
75
+ return derived();
76
+ }
77
+
78
+ } // end namespace Eigen
79
+
80
+ #endif // EIGEN_ASSIGN_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/BandMatrix.h ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_BANDMATRIX_H
11
+ #define EIGEN_BANDMATRIX_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ template <typename Derived>
21
+ class BandMatrixBase : public EigenBase<Derived> {
22
+ public:
23
+ enum {
24
+ Flags = internal::traits<Derived>::Flags,
25
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
26
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
27
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
28
+ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
29
+ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
30
+ Supers = internal::traits<Derived>::Supers,
31
+ Subs = internal::traits<Derived>::Subs,
32
+ Options = internal::traits<Derived>::Options
33
+ };
34
+ typedef typename internal::traits<Derived>::Scalar Scalar;
35
+ typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime> DenseMatrixType;
36
+ typedef typename DenseMatrixType::StorageIndex StorageIndex;
37
+ typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;
38
+ typedef EigenBase<Derived> Base;
39
+
40
+ protected:
41
+ enum {
42
+ DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic,
43
+ SizeAtCompileTime = min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)
44
+ };
45
+
46
+ public:
47
+ using Base::cols;
48
+ using Base::derived;
49
+ using Base::rows;
50
+
51
+ /** \returns the number of super diagonals */
52
+ inline Index supers() const { return derived().supers(); }
53
+
54
+ /** \returns the number of sub diagonals */
55
+ inline Index subs() const { return derived().subs(); }
56
+
57
+ /** \returns an expression of the underlying coefficient matrix */
58
+ inline const CoefficientsType& coeffs() const { return derived().coeffs(); }
59
+
60
+ /** \returns an expression of the underlying coefficient matrix */
61
+ inline CoefficientsType& coeffs() { return derived().coeffs(); }
62
+
63
+ /** \returns a vector expression of the \a i -th column,
64
+ * only the meaningful part is returned.
65
+ * \warning the internal storage must be column major. */
66
+ inline Block<CoefficientsType, Dynamic, 1> col(Index i) {
67
+ EIGEN_STATIC_ASSERT((int(Options) & int(RowMajor)) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
68
+ Index start = 0;
69
+ Index len = coeffs().rows();
70
+ if (i <= supers()) {
71
+ start = supers() - i;
72
+ len = (std::min)(rows(), std::max<Index>(0, coeffs().rows() - (supers() - i)));
73
+ } else if (i >= rows() - subs())
74
+ len = std::max<Index>(0, coeffs().rows() - (i + 1 - rows() + subs()));
75
+ return Block<CoefficientsType, Dynamic, 1>(coeffs(), start, i, len, 1);
76
+ }
77
+
78
+ /** \returns a vector expression of the main diagonal */
79
+ inline Block<CoefficientsType, 1, SizeAtCompileTime> diagonal() {
80
+ return Block<CoefficientsType, 1, SizeAtCompileTime>(coeffs(), supers(), 0, 1, (std::min)(rows(), cols()));
81
+ }
82
+
83
+ /** \returns a vector expression of the main diagonal (const version) */
84
+ inline const Block<const CoefficientsType, 1, SizeAtCompileTime> diagonal() const {
85
+ return Block<const CoefficientsType, 1, SizeAtCompileTime>(coeffs(), supers(), 0, 1, (std::min)(rows(), cols()));
86
+ }
87
+
88
+ template <int Index>
89
+ struct DiagonalIntReturnType {
90
+ enum {
91
+ ReturnOpposite =
92
+ (int(Options) & int(SelfAdjoint)) && (((Index) > 0 && Supers == 0) || ((Index) < 0 && Subs == 0)),
93
+ Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex,
94
+ ActualIndex = ReturnOpposite ? -Index : Index,
95
+ DiagonalSize =
96
+ (RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic)
97
+ ? Dynamic
98
+ : (ActualIndex < 0 ? min_size_prefer_dynamic(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
99
+ : min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
100
+ };
101
+ typedef Block<CoefficientsType, 1, DiagonalSize> BuildType;
102
+ typedef std::conditional_t<Conjugate, CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, BuildType>, BuildType>
103
+ Type;
104
+ };
105
+
106
+ /** \returns a vector expression of the \a N -th sub or super diagonal */
107
+ template <int N>
108
+ inline typename DiagonalIntReturnType<N>::Type diagonal() {
109
+ return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers() - N, (std::max)(0, N), 1, diagonalLength(N));
110
+ }
111
+
112
+ /** \returns a vector expression of the \a N -th sub or super diagonal */
113
+ template <int N>
114
+ inline const typename DiagonalIntReturnType<N>::Type diagonal() const {
115
+ return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers() - N, (std::max)(0, N), 1, diagonalLength(N));
116
+ }
117
+
118
+ /** \returns a vector expression of the \a i -th sub or super diagonal */
119
+ inline Block<CoefficientsType, 1, Dynamic> diagonal(Index i) {
120
+ eigen_assert((i < 0 && -i <= subs()) || (i >= 0 && i <= supers()));
121
+ return Block<CoefficientsType, 1, Dynamic>(coeffs(), supers() - i, std::max<Index>(0, i), 1, diagonalLength(i));
122
+ }
123
+
124
+ /** \returns a vector expression of the \a i -th sub or super diagonal */
125
+ inline const Block<const CoefficientsType, 1, Dynamic> diagonal(Index i) const {
126
+ eigen_assert((i < 0 && -i <= subs()) || (i >= 0 && i <= supers()));
127
+ return Block<const CoefficientsType, 1, Dynamic>(coeffs(), supers() - i, std::max<Index>(0, i), 1,
128
+ diagonalLength(i));
129
+ }
130
+
131
+ template <typename Dest>
132
+ inline void evalTo(Dest& dst) const {
133
+ dst.resize(rows(), cols());
134
+ dst.setZero();
135
+ dst.diagonal() = diagonal();
136
+ for (Index i = 1; i <= supers(); ++i) dst.diagonal(i) = diagonal(i);
137
+ for (Index i = 1; i <= subs(); ++i) dst.diagonal(-i) = diagonal(-i);
138
+ }
139
+
140
+ DenseMatrixType toDenseMatrix() const {
141
+ DenseMatrixType res(rows(), cols());
142
+ evalTo(res);
143
+ return res;
144
+ }
145
+
146
+ protected:
147
+ inline Index diagonalLength(Index i) const {
148
+ return i < 0 ? (std::min)(cols(), rows() + i) : (std::min)(rows(), cols() - i);
149
+ }
150
+ };
151
+
152
+ /**
153
+ * \class BandMatrix
154
+ * \ingroup Core_Module
155
+ *
156
+ * \brief Represents a rectangular matrix with a banded storage
157
+ *
158
+ * \tparam Scalar_ Numeric type, i.e. float, double, int
159
+ * \tparam Rows_ Number of rows, or \b Dynamic
160
+ * \tparam Cols_ Number of columns, or \b Dynamic
161
+ * \tparam Supers_ Number of super diagonal
162
+ * \tparam Subs_ Number of sub diagonal
163
+ * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint
164
+ * The former controls \ref TopicStorageOrders "storage order", and defaults to
165
+ * column-major. The latter controls whether the matrix represents a selfadjoint
166
+ * matrix in which case either Supers of Subs have to be null.
167
+ *
168
+ * \sa class TridiagonalMatrix
169
+ */
170
+
171
+ template <typename Scalar_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
172
+ struct traits<BandMatrix<Scalar_, Rows_, Cols_, Supers_, Subs_, Options_> > {
173
+ typedef Scalar_ Scalar;
174
+ typedef Dense StorageKind;
175
+ typedef Eigen::Index StorageIndex;
176
+ enum {
177
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
178
+ RowsAtCompileTime = Rows_,
179
+ ColsAtCompileTime = Cols_,
180
+ MaxRowsAtCompileTime = Rows_,
181
+ MaxColsAtCompileTime = Cols_,
182
+ Flags = LvalueBit,
183
+ Supers = Supers_,
184
+ Subs = Subs_,
185
+ Options = Options_,
186
+ DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic
187
+ };
188
+ typedef Matrix<Scalar, DataRowsAtCompileTime, ColsAtCompileTime, int(Options) & int(RowMajor) ? RowMajor : ColMajor>
189
+ CoefficientsType;
190
+ };
191
+
192
+ template <typename Scalar_, int Rows, int Cols, int Supers, int Subs, int Options>
193
+ class BandMatrix : public BandMatrixBase<BandMatrix<Scalar_, Rows, Cols, Supers, Subs, Options> > {
194
+ public:
195
+ typedef typename internal::traits<BandMatrix>::Scalar Scalar;
196
+ typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex;
197
+ typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;
198
+
199
+ explicit inline BandMatrix(Index rows = Rows, Index cols = Cols, Index supers = Supers, Index subs = Subs)
200
+ : m_coeffs(1 + supers + subs, cols), m_rows(rows), m_supers(supers), m_subs(subs) {}
201
+
202
+ /** \returns the number of columns */
203
+ inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); }
204
+
205
+ /** \returns the number of rows */
206
+ inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); }
207
+
208
+ /** \returns the number of super diagonals */
209
+ inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); }
210
+
211
+ /** \returns the number of sub diagonals */
212
+ inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); }
213
+
214
+ inline const CoefficientsType& coeffs() const { return m_coeffs; }
215
+ inline CoefficientsType& coeffs() { return m_coeffs; }
216
+
217
+ protected:
218
+ CoefficientsType m_coeffs;
219
+ internal::variable_if_dynamic<Index, Rows> m_rows;
220
+ internal::variable_if_dynamic<Index, Supers> m_supers;
221
+ internal::variable_if_dynamic<Index, Subs> m_subs;
222
+ };
223
+
224
+ template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
225
+ class BandMatrixWrapper;
226
+
227
+ template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
228
+ struct traits<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> > {
229
+ typedef typename CoefficientsType_::Scalar Scalar;
230
+ typedef typename CoefficientsType_::StorageKind StorageKind;
231
+ typedef typename CoefficientsType_::StorageIndex StorageIndex;
232
+ enum {
233
+ CoeffReadCost = internal::traits<CoefficientsType_>::CoeffReadCost,
234
+ RowsAtCompileTime = Rows_,
235
+ ColsAtCompileTime = Cols_,
236
+ MaxRowsAtCompileTime = Rows_,
237
+ MaxColsAtCompileTime = Cols_,
238
+ Flags = LvalueBit,
239
+ Supers = Supers_,
240
+ Subs = Subs_,
241
+ Options = Options_,
242
+ DataRowsAtCompileTime = ((Supers != Dynamic) && (Subs != Dynamic)) ? 1 + Supers + Subs : Dynamic
243
+ };
244
+ typedef CoefficientsType_ CoefficientsType;
245
+ };
246
+
247
+ template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
248
+ class BandMatrixWrapper
249
+ : public BandMatrixBase<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> > {
250
+ public:
251
+ typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;
252
+ typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;
253
+ typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex;
254
+
255
+ explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows = Rows_, Index cols = Cols_,
256
+ Index supers = Supers_, Index subs = Subs_)
257
+ : m_coeffs(coeffs), m_rows(rows), m_supers(supers), m_subs(subs) {
258
+ EIGEN_UNUSED_VARIABLE(cols);
259
+ // eigen_assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
260
+ }
261
+
262
+ /** \returns the number of columns */
263
+ inline EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); }
264
+
265
+ /** \returns the number of rows */
266
+ inline EIGEN_CONSTEXPR Index cols() const { return m_coeffs.cols(); }
267
+
268
+ /** \returns the number of super diagonals */
269
+ inline EIGEN_CONSTEXPR Index supers() const { return m_supers.value(); }
270
+
271
+ /** \returns the number of sub diagonals */
272
+ inline EIGEN_CONSTEXPR Index subs() const { return m_subs.value(); }
273
+
274
+ inline const CoefficientsType& coeffs() const { return m_coeffs; }
275
+
276
+ protected:
277
+ const CoefficientsType& m_coeffs;
278
+ internal::variable_if_dynamic<Index, Rows_> m_rows;
279
+ internal::variable_if_dynamic<Index, Supers_> m_supers;
280
+ internal::variable_if_dynamic<Index, Subs_> m_subs;
281
+ };
282
+
283
+ /**
284
+ * \class TridiagonalMatrix
285
+ * \ingroup Core_Module
286
+ *
287
+ * \brief Represents a tridiagonal matrix with a compact banded storage
288
+ *
289
+ * \tparam Scalar Numeric type, i.e. float, double, int
290
+ * \tparam Size Number of rows and cols, or \b Dynamic
291
+ * \tparam Options Can be 0 or \b SelfAdjoint
292
+ *
293
+ * \sa class BandMatrix
294
+ */
295
+ template <typename Scalar, int Size, int Options>
296
+ class TridiagonalMatrix : public BandMatrix<Scalar, Size, Size, Options & SelfAdjoint ? 0 : 1, 1, Options | RowMajor> {
297
+ typedef BandMatrix<Scalar, Size, Size, Options & SelfAdjoint ? 0 : 1, 1, Options | RowMajor> Base;
298
+ typedef typename Base::StorageIndex StorageIndex;
299
+
300
+ public:
301
+ explicit TridiagonalMatrix(Index size = Size) : Base(size, size, Options & SelfAdjoint ? 0 : 1, 1) {}
302
+
303
+ inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); }
304
+ inline const typename Base::template DiagonalIntReturnType<1>::Type super() const {
305
+ return Base::template diagonal<1>();
306
+ }
307
+ inline typename Base::template DiagonalIntReturnType<-1>::Type sub() { return Base::template diagonal<-1>(); }
308
+ inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const {
309
+ return Base::template diagonal<-1>();
310
+ }
311
+
312
+ protected:
313
+ };
314
+
315
+ struct BandShape {};
316
+
317
+ template <typename Scalar_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
318
+ struct evaluator_traits<BandMatrix<Scalar_, Rows_, Cols_, Supers_, Subs_, Options_> >
319
+ : public evaluator_traits_base<BandMatrix<Scalar_, Rows_, Cols_, Supers_, Subs_, Options_> > {
320
+ typedef BandShape Shape;
321
+ };
322
+
323
+ template <typename CoefficientsType_, int Rows_, int Cols_, int Supers_, int Subs_, int Options_>
324
+ struct evaluator_traits<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> >
325
+ : public evaluator_traits_base<BandMatrixWrapper<CoefficientsType_, Rows_, Cols_, Supers_, Subs_, Options_> > {
326
+ typedef BandShape Shape;
327
+ };
328
+
329
+ template <>
330
+ struct AssignmentKind<DenseShape, BandShape> {
331
+ typedef EigenBase2EigenBase Kind;
332
+ };
333
+
334
+ } // end namespace internal
335
+
336
+ } // end namespace Eigen
337
+
338
+ #endif // EIGEN_BANDMATRIX_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CommaInitializer.h ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_COMMAINITIALIZER_H
12
+ #define EIGEN_COMMAINITIALIZER_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \class CommaInitializer
20
+ * \ingroup Core_Module
21
+ *
22
+ * \brief Helper class used by the comma initializer operator
23
+ *
24
+ * This class is internally used to implement the comma initializer feature. It is
25
+ * the return type of MatrixBase::operator<<, and most of the time this is the only
26
+ * way it is used.
27
+ *
28
+ * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
29
+ */
30
+ template <typename XprType>
31
+ struct CommaInitializer {
32
+ typedef typename XprType::Scalar Scalar;
33
+
34
+ EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s)
35
+ : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) {
36
+ eigen_assert(m_xpr.rows() > 0 && m_xpr.cols() > 0 && "Cannot comma-initialize a 0x0 matrix (operator<<)");
37
+ m_xpr.coeffRef(0, 0) = s;
38
+ }
39
+
40
+ template <typename OtherDerived>
41
+ EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
42
+ : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) {
43
+ eigen_assert(m_xpr.rows() >= other.rows() && m_xpr.cols() >= other.cols() &&
44
+ "Cannot comma-initialize a 0x0 matrix (operator<<)");
45
+ m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>(0, 0, other.rows(),
46
+ other.cols()) = other;
47
+ }
48
+
49
+ /* Copy/Move constructor which transfers ownership. This is crucial in
50
+ * absence of return value optimization to avoid assertions during destruction. */
51
+ // FIXME in C++11 mode this could be replaced by a proper RValue constructor
52
+ EIGEN_DEVICE_FUNC inline CommaInitializer(const CommaInitializer& o)
53
+ : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
54
+ // Mark original object as finished. In absence of R-value references we need to const_cast:
55
+ const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
56
+ const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
57
+ const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
58
+ }
59
+
60
+ /* inserts a scalar value in the target matrix */
61
+ EIGEN_DEVICE_FUNC CommaInitializer &operator,(const Scalar& s) {
62
+ if (m_col == m_xpr.cols()) {
63
+ m_row += m_currentBlockRows;
64
+ m_col = 0;
65
+ m_currentBlockRows = 1;
66
+ eigen_assert(m_row < m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)");
67
+ }
68
+ eigen_assert(m_col < m_xpr.cols() && "Too many coefficients passed to comma initializer (operator<<)");
69
+ eigen_assert(m_currentBlockRows == 1);
70
+ m_xpr.coeffRef(m_row, m_col++) = s;
71
+ return *this;
72
+ }
73
+
74
+ /* inserts a matrix expression in the target matrix */
75
+ template <typename OtherDerived>
76
+ EIGEN_DEVICE_FUNC CommaInitializer &operator,(const DenseBase<OtherDerived>& other) {
77
+ if (m_col == m_xpr.cols() && (other.cols() != 0 || other.rows() != m_currentBlockRows)) {
78
+ m_row += m_currentBlockRows;
79
+ m_col = 0;
80
+ m_currentBlockRows = other.rows();
81
+ eigen_assert(m_row + m_currentBlockRows <= m_xpr.rows() &&
82
+ "Too many rows passed to comma initializer (operator<<)");
83
+ }
84
+ eigen_assert((m_col + other.cols() <= m_xpr.cols()) &&
85
+ "Too many coefficients passed to comma initializer (operator<<)");
86
+ eigen_assert(m_currentBlockRows == other.rows());
87
+ m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>(m_row, m_col, other.rows(),
88
+ other.cols()) = other;
89
+ m_col += other.cols();
90
+ return *this;
91
+ }
92
+
93
+ EIGEN_DEVICE_FUNC inline ~CommaInitializer()
94
+ #if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS
95
+ EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception)
96
+ #endif
97
+ {
98
+ finished();
99
+ }
100
+
101
+ /** \returns the built matrix once all its coefficients have been set.
102
+ * Calling finished is 100% optional. Its purpose is to write expressions
103
+ * like this:
104
+ * \code
105
+ * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
106
+ * \endcode
107
+ */
108
+ EIGEN_DEVICE_FUNC inline XprType& finished() {
109
+ eigen_assert(((m_row + m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) && m_col == m_xpr.cols() &&
110
+ "Too few coefficients passed to comma initializer (operator<<)");
111
+ return m_xpr;
112
+ }
113
+
114
+ XprType& m_xpr; // target expression
115
+ Index m_row; // current row id
116
+ Index m_col; // current col id
117
+ Index m_currentBlockRows; // current block height
118
+ };
119
+
120
+ /** \anchor MatrixBaseCommaInitRef
121
+ * Convenient operator to set the coefficients of a matrix.
122
+ *
123
+ * The coefficients must be provided in a row major order and exactly match
124
+ * the size of the matrix. Otherwise an assertion is raised.
125
+ *
126
+ * Example: \include MatrixBase_set.cpp
127
+ * Output: \verbinclude MatrixBase_set.out
128
+ *
129
+ * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary
130
+ * order.
131
+ *
132
+ * \sa CommaInitializer::finished(), class CommaInitializer
133
+ */
134
+ template <typename Derived>
135
+ EIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<<(const Scalar& s) {
136
+ return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
137
+ }
138
+
139
+ /** \sa operator<<(const Scalar&) */
140
+ template <typename Derived>
141
+ template <typename OtherDerived>
142
+ EIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<<(
143
+ const DenseBase<OtherDerived>& other) {
144
+ return CommaInitializer<Derived>(*static_cast<Derived*>(this), other);
145
+ }
146
+
147
+ } // end namespace Eigen
148
+
149
+ #endif // EIGEN_COMMAINITIALIZER_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ConditionEstimator.h ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_CONDITIONESTIMATOR_H
11
+ #define EIGEN_CONDITIONESTIMATOR_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ template <typename Vector, typename RealVector, bool IsComplex>
21
+ struct rcond_compute_sign {
22
+ static inline Vector run(const Vector& v) {
23
+ const RealVector v_abs = v.cwiseAbs();
24
+ return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
25
+ .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
26
+ }
27
+ };
28
+
29
+ // Partial specialization to avoid elementwise division for real vectors.
30
+ template <typename Vector>
31
+ struct rcond_compute_sign<Vector, Vector, false> {
32
+ static inline Vector run(const Vector& v) {
33
+ return (v.array() < static_cast<typename Vector::RealScalar>(0))
34
+ .select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
35
+ }
36
+ };
37
+
38
+ /**
39
+ * \returns an estimate of ||inv(matrix)||_1 given a decomposition of
40
+ * \a matrix that implements .solve() and .adjoint().solve() methods.
41
+ *
42
+ * This function implements Algorithms 4.1 and 5.1 from
43
+ * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
44
+ * which also forms the basis for the condition number estimators in
45
+ * LAPACK. Since at most 10 calls to the solve method of dec are
46
+ * performed, the total cost is O(dims^2), as opposed to O(dims^3)
47
+ * needed to compute the inverse matrix explicitly.
48
+ *
49
+ * The most common usage is in estimating the condition number
50
+ * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
51
+ * computed directly in O(n^2) operations.
52
+ *
53
+ * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
54
+ * LLT.
55
+ *
56
+ * \sa FullPivLU, PartialPivLU, LDLT, LLT.
57
+ */
58
+ template <typename Decomposition>
59
+ typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) {
60
+ typedef typename Decomposition::MatrixType MatrixType;
61
+ typedef typename Decomposition::Scalar Scalar;
62
+ typedef typename Decomposition::RealScalar RealScalar;
63
+ typedef typename internal::plain_col_type<MatrixType>::type Vector;
64
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
65
+ const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
66
+
67
+ eigen_assert(dec.rows() == dec.cols());
68
+ const Index n = dec.rows();
69
+ if (n == 0) return 0;
70
+
71
+ // Disable Index to float conversion warning
72
+ #ifdef __INTEL_COMPILER
73
+ #pragma warning push
74
+ #pragma warning(disable : 2259)
75
+ #endif
76
+ Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
77
+ #ifdef __INTEL_COMPILER
78
+ #pragma warning pop
79
+ #endif
80
+
81
+ // lower_bound is a lower bound on
82
+ // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
83
+ // and is the objective maximized by the ("super-") gradient ascent
84
+ // algorithm below.
85
+ RealScalar lower_bound = v.template lpNorm<1>();
86
+ if (n == 1) return lower_bound;
87
+
88
+ // Gradient ascent algorithm follows: We know that the optimum is achieved at
89
+ // one of the simplices v = e_i, so in each iteration we follow a
90
+ // super-gradient to move towards the optimal one.
91
+ RealScalar old_lower_bound = lower_bound;
92
+ Vector sign_vector(n);
93
+ Vector old_sign_vector;
94
+ Index v_max_abs_index = -1;
95
+ Index old_v_max_abs_index = v_max_abs_index;
96
+ for (int k = 0; k < 4; ++k) {
97
+ sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);
98
+ if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
99
+ // Break if the solution stagnated.
100
+ break;
101
+ }
102
+ // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
103
+ v = dec.adjoint().solve(sign_vector);
104
+ v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
105
+ if (v_max_abs_index == old_v_max_abs_index) {
106
+ // Break if the solution stagnated.
107
+ break;
108
+ }
109
+ // Move to the new simplex e_j, where j = v_max_abs_index.
110
+ v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
111
+ lower_bound = v.template lpNorm<1>();
112
+ if (lower_bound <= old_lower_bound) {
113
+ // Break if the gradient step did not increase the lower_bound.
114
+ break;
115
+ }
116
+ if (!is_complex) {
117
+ old_sign_vector = sign_vector;
118
+ }
119
+ old_v_max_abs_index = v_max_abs_index;
120
+ old_lower_bound = lower_bound;
121
+ }
122
+ // The following calculates an independent estimate of ||matrix||_1 by
123
+ // multiplying matrix by a vector with entries of slowly increasing
124
+ // magnitude and alternating sign:
125
+ // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
126
+ // This improvement to Hager's algorithm above is due to Higham. It was
127
+ // added to make the algorithm more robust in certain corner cases where
128
+ // large elements in the matrix might otherwise escape detection due to
129
+ // exact cancellation (especially when op and op_adjoint correspond to a
130
+ // sequence of backsubstitutions and permutations), which could cause
131
+ // Hager's algorithm to vastly underestimate ||matrix||_1.
132
+ Scalar alternating_sign(RealScalar(1));
133
+ for (Index i = 0; i < n; ++i) {
134
+ // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
135
+ v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
136
+ alternating_sign = -alternating_sign;
137
+ }
138
+ v = dec.solve(v);
139
+ const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
140
+ return numext::maxi(lower_bound, alternate_lower_bound);
141
+ }
142
+
143
+ /** \brief Reciprocal condition number estimator.
144
+ *
145
+ * Computing a decomposition of a dense matrix takes O(n^3) operations, while
146
+ * this method estimates the condition number quickly and reliably in O(n^2)
147
+ * operations.
148
+ *
149
+ * \returns an estimate of the reciprocal condition number
150
+ * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
151
+ * its decomposition. Supports the following decompositions: FullPivLU,
152
+ * PartialPivLU, LDLT, and LLT.
153
+ *
154
+ * \sa FullPivLU, PartialPivLU, LDLT, LLT.
155
+ */
156
+ template <typename Decomposition>
157
+ typename Decomposition::RealScalar rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm,
158
+ const Decomposition& dec) {
159
+ typedef typename Decomposition::RealScalar RealScalar;
160
+ eigen_assert(dec.rows() == dec.cols());
161
+ if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
162
+ if (numext::is_exactly_zero(matrix_norm)) return RealScalar(0);
163
+ if (dec.rows() == 1) return RealScalar(1);
164
+ const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
165
+ return (numext::is_exactly_zero(inverse_matrix_norm) ? RealScalar(0)
166
+ : (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
167
+ }
168
+
169
+ } // namespace internal
170
+
171
+ } // namespace Eigen
172
+
173
+ #endif
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseBinaryOp.h ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_CWISE_BINARY_OP_H
12
+ #define EIGEN_CWISE_BINARY_OP_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+ template <typename BinaryOp, typename Lhs, typename Rhs>
21
+ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs>> {
22
+ // we must not inherit from traits<Lhs> since it has
23
+ // the potential to cause problems with MSVC
24
+ typedef remove_all_t<Lhs> Ancestor;
25
+ typedef typename traits<Ancestor>::XprKind XprKind;
26
+ enum {
27
+ RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
28
+ ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
29
+ MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
30
+ MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
31
+ };
32
+
33
+ // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
34
+ // we still want to handle the case when the result type is different.
35
+ typedef typename result_of<BinaryOp(const typename Lhs::Scalar&, const typename Rhs::Scalar&)>::type Scalar;
36
+ typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind,
37
+ BinaryOp>::ret StorageKind;
38
+ typedef typename promote_index_type<typename traits<Lhs>::StorageIndex, typename traits<Rhs>::StorageIndex>::type
39
+ StorageIndex;
40
+ typedef typename Lhs::Nested LhsNested;
41
+ typedef typename Rhs::Nested RhsNested;
42
+ typedef std::remove_reference_t<LhsNested> LhsNested_;
43
+ typedef std::remove_reference_t<RhsNested> RhsNested_;
44
+ enum {
45
+ Flags = cwise_promote_storage_order<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind,
46
+ LhsNested_::Flags & RowMajorBit, RhsNested_::Flags & RowMajorBit>::value
47
+ };
48
+ };
49
+ } // end namespace internal
50
+
51
+ template <typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
52
+ class CwiseBinaryOpImpl;
53
+
54
+ /** \class CwiseBinaryOp
55
+ * \ingroup Core_Module
56
+ *
57
+ * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
58
+ *
59
+ * \tparam BinaryOp template functor implementing the operator
60
+ * \tparam LhsType the type of the left-hand side
61
+ * \tparam RhsType the type of the right-hand side
62
+ *
63
+ * This class represents an expression where a coefficient-wise binary operator is applied to two expressions.
64
+ * It is the return type of binary operators, by which we mean only those binary operators where
65
+ * both the left-hand side and the right-hand side are Eigen expressions.
66
+ * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
67
+ *
68
+ * Most of the time, this is the only way that it is used, so you typically don't have to name
69
+ * CwiseBinaryOp types explicitly.
70
+ *
71
+ * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class
72
+ * CwiseNullaryOp
73
+ */
74
+ template <typename BinaryOp, typename LhsType, typename RhsType>
75
+ class CwiseBinaryOp : public CwiseBinaryOpImpl<BinaryOp, LhsType, RhsType,
76
+ typename internal::cwise_promote_storage_type<
77
+ typename internal::traits<LhsType>::StorageKind,
78
+ typename internal::traits<RhsType>::StorageKind, BinaryOp>::ret>,
79
+ internal::no_assignment_operator {
80
+ public:
81
+ typedef internal::remove_all_t<BinaryOp> Functor;
82
+ typedef internal::remove_all_t<LhsType> Lhs;
83
+ typedef internal::remove_all_t<RhsType> Rhs;
84
+
85
+ typedef typename CwiseBinaryOpImpl<
86
+ BinaryOp, LhsType, RhsType,
87
+ typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
88
+ typename internal::traits<Rhs>::StorageKind, BinaryOp>::ret>::Base
89
+ Base;
90
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
91
+
92
+ EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp, typename Lhs::Scalar, typename Rhs::Scalar)
93
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
94
+
95
+ typedef typename internal::ref_selector<LhsType>::type LhsNested;
96
+ typedef typename internal::ref_selector<RhsType>::type RhsNested;
97
+ typedef std::remove_reference_t<LhsNested> LhsNested_;
98
+ typedef std::remove_reference_t<RhsNested> RhsNested_;
99
+
100
+ #if EIGEN_COMP_MSVC
101
+ // Required for Visual Studio or the Copy constructor will probably not get inlined!
102
+ EIGEN_STRONG_INLINE CwiseBinaryOp(const CwiseBinaryOp<BinaryOp, LhsType, RhsType>&) = default;
103
+ #endif
104
+
105
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs,
106
+ const BinaryOp& func = BinaryOp())
107
+ : m_lhs(aLhs), m_rhs(aRhs), m_functor(func) {
108
+ eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
109
+ }
110
+
111
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT {
112
+ // return the fixed size type if available to enable compile time optimizations
113
+ return internal::traits<internal::remove_all_t<LhsNested>>::RowsAtCompileTime == Dynamic ? m_rhs.rows()
114
+ : m_lhs.rows();
115
+ }
116
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT {
117
+ // return the fixed size type if available to enable compile time optimizations
118
+ return internal::traits<internal::remove_all_t<LhsNested>>::ColsAtCompileTime == Dynamic ? m_rhs.cols()
119
+ : m_lhs.cols();
120
+ }
121
+
122
+ /** \returns the left hand side nested expression */
123
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNested_& lhs() const { return m_lhs; }
124
+ /** \returns the right hand side nested expression */
125
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNested_& rhs() const { return m_rhs; }
126
+ /** \returns the functor representing the binary operation */
127
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; }
128
+
129
+ protected:
130
+ LhsNested m_lhs;
131
+ RhsNested m_rhs;
132
+ const BinaryOp m_functor;
133
+ };
134
+
135
+ // Generic API dispatcher
136
+ template <typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
137
+ class CwiseBinaryOpImpl : public internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs>>::type {
138
+ public:
139
+ typedef typename internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs>>::type Base;
140
+ };
141
+
142
+ /** replaces \c *this by \c *this - \a other.
143
+ *
144
+ * \returns a reference to \c *this
145
+ */
146
+ template <typename Derived>
147
+ template <typename OtherDerived>
148
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived>& other) {
149
+ call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>());
150
+ return derived();
151
+ }
152
+
153
+ /** replaces \c *this by \c *this + \a other.
154
+ *
155
+ * \returns a reference to \c *this
156
+ */
157
+ template <typename Derived>
158
+ template <typename OtherDerived>
159
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other) {
160
+ call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar, typename OtherDerived::Scalar>());
161
+ return derived();
162
+ }
163
+
164
+ } // end namespace Eigen
165
+
166
+ #endif // EIGEN_CWISE_BINARY_OP_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseNullaryOp.h ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_CWISE_NULLARY_OP_H
11
+ #define EIGEN_CWISE_NULLARY_OP_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+ template <typename NullaryOp, typename PlainObjectType>
20
+ struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType> {
21
+ enum { Flags = traits<PlainObjectType>::Flags & RowMajorBit };
22
+ };
23
+
24
+ } // namespace internal
25
+
26
+ /** \class CwiseNullaryOp
27
+ * \ingroup Core_Module
28
+ *
29
+ * \brief Generic expression of a matrix where all coefficients are defined by a functor
30
+ *
31
+ * \tparam NullaryOp template functor implementing the operator
32
+ * \tparam PlainObjectType the underlying plain matrix/array type
33
+ *
34
+ * This class represents an expression of a generic nullary operator.
35
+ * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
36
+ * and most of the time this is the only way it is used.
37
+ *
38
+ * However, if you want to write a function returning such an expression, you
39
+ * will need to use this class.
40
+ *
41
+ * The functor NullaryOp must expose one of the following method:
42
+ <table class="manual">
43
+ <tr ><td>\c operator()() </td><td>if the procedural generation does not depend on the coefficient entries
44
+ (e.g., random numbers)</td></tr> <tr class="alt"><td>\c operator()(Index i)</td><td>if the procedural generation makes
45
+ sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace) </td></tr> <tr ><td>\c
46
+ operator()(Index i,Index j)</td><td>if the procedural generation depends on the matrix coordinates \c i, \c j (e.g.,
47
+ to generate a checkerboard with 0 and 1)</td></tr>
48
+ </table>
49
+ * It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized
50
+ for vectors.
51
+ *
52
+ * See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding
53
+ * C++11 random number generators.
54
+ *
55
+ * A nullary expression can also be used to implement custom sophisticated matrix manipulations
56
+ * that cannot be covered by the existing set of natively supported matrix manipulations.
57
+ * See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations
58
+ * on the behavior of CwiseNullaryOp.
59
+ *
60
+ * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr
61
+ */
62
+ template <typename NullaryOp, typename PlainObjectType>
63
+ class CwiseNullaryOp : public internal::dense_xpr_base<CwiseNullaryOp<NullaryOp, PlainObjectType> >::type,
64
+ internal::no_assignment_operator {
65
+ public:
66
+ typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
67
+ EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
68
+
69
+ EIGEN_DEVICE_FUNC CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
70
+ : m_rows(rows), m_cols(cols), m_functor(func) {
71
+ eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 &&
72
+ (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
73
+ }
74
+
75
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const { return m_rows.value(); }
76
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const { return m_cols.value(); }
77
+
78
+ /** \returns the functor representing the nullary operation */
79
+ EIGEN_DEVICE_FUNC const NullaryOp& functor() const { return m_functor; }
80
+
81
+ protected:
82
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
83
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
84
+ const NullaryOp m_functor;
85
+ };
86
+
87
+ /** \returns an expression of a matrix defined by a custom functor \a func
88
+ *
89
+ * The parameters \a rows and \a cols are the number of rows and of columns of
90
+ * the returned matrix. Must be compatible with this MatrixBase type.
91
+ *
92
+ * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
93
+ * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
94
+ * instead.
95
+ *
96
+ * The template parameter \a CustomNullaryOp is the type of the functor.
97
+ *
98
+ * \sa class CwiseNullaryOp
99
+ */
100
+ template <typename Derived>
101
+ template <typename CustomNullaryOp>
102
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
103
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
104
+ const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
105
+ #else
106
+ const CwiseNullaryOp<CustomNullaryOp, PlainObject>
107
+ #endif
108
+ DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) {
109
+ return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func);
110
+ }
111
+
112
+ /** \returns an expression of a matrix defined by a custom functor \a func
113
+ *
114
+ * The parameter \a size is the size of the returned vector.
115
+ * Must be compatible with this MatrixBase type.
116
+ *
117
+ * \only_for_vectors
118
+ *
119
+ * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
120
+ * it is redundant to pass \a size as argument, so Zero() should be used
121
+ * instead.
122
+ *
123
+ * The template parameter \a CustomNullaryOp is the type of the functor.
124
+ *
125
+ * Here is an example with C++11 random generators: \include random_cpp11.cpp
126
+ * Output: \verbinclude random_cpp11.out
127
+ *
128
+ * \sa class CwiseNullaryOp
129
+ */
130
+ template <typename Derived>
131
+ template <typename CustomNullaryOp>
132
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
133
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
134
+ const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
135
+ #else
136
+ const CwiseNullaryOp<CustomNullaryOp, PlainObject>
137
+ #endif
138
+ DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func) {
139
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
140
+ if (RowsAtCompileTime == 1)
141
+ return CwiseNullaryOp<CustomNullaryOp, PlainObject>(1, size, func);
142
+ else
143
+ return CwiseNullaryOp<CustomNullaryOp, PlainObject>(size, 1, func);
144
+ }
145
+
146
+ /** \returns an expression of a matrix defined by a custom functor \a func
147
+ *
148
+ * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
149
+ * need to use the variants taking size arguments.
150
+ *
151
+ * The template parameter \a CustomNullaryOp is the type of the functor.
152
+ *
153
+ * \sa class CwiseNullaryOp
154
+ */
155
+ template <typename Derived>
156
+ template <typename CustomNullaryOp>
157
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
158
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
159
+ const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
160
+ #else
161
+ const CwiseNullaryOp<CustomNullaryOp, PlainObject>
162
+ #endif
163
+ DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func) {
164
+ return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func);
165
+ }
166
+
167
+ /** \returns an expression of a constant matrix of value \a value
168
+ *
169
+ * The parameters \a rows and \a cols are the number of rows and of columns of
170
+ * the returned matrix. Must be compatible with this DenseBase type.
171
+ *
172
+ * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
173
+ * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
174
+ * instead.
175
+ *
176
+ * The template parameter \a CustomNullaryOp is the type of the functor.
177
+ *
178
+ * \sa class CwiseNullaryOp
179
+ */
180
+ template <typename Derived>
181
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
182
+ DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value) {
183
+ return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
184
+ }
185
+
186
+ /** \returns an expression of a constant matrix of value \a value
187
+ *
188
+ * The parameter \a size is the size of the returned vector.
189
+ * Must be compatible with this DenseBase type.
190
+ *
191
+ * \only_for_vectors
192
+ *
193
+ * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
194
+ * it is redundant to pass \a size as argument, so Zero() should be used
195
+ * instead.
196
+ *
197
+ * The template parameter \a CustomNullaryOp is the type of the functor.
198
+ *
199
+ * \sa class CwiseNullaryOp
200
+ */
201
+ template <typename Derived>
202
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
203
+ DenseBase<Derived>::Constant(Index size, const Scalar& value) {
204
+ return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
205
+ }
206
+
207
+ /** \returns an expression of a constant matrix of value \a value
208
+ *
209
+ * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
210
+ * need to use the variants taking size arguments.
211
+ *
212
+ * The template parameter \a CustomNullaryOp is the type of the functor.
213
+ *
214
+ * \sa class CwiseNullaryOp
215
+ */
216
+ template <typename Derived>
217
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
218
+ DenseBase<Derived>::Constant(const Scalar& value) {
219
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
220
+ return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime,
221
+ internal::scalar_constant_op<Scalar>(value));
222
+ }
223
+
224
+ /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)
225
+ *
226
+ * \only_for_vectors
227
+ *
228
+ * Example: \include DenseBase_LinSpaced_seq_deprecated.cpp
229
+ * Output: \verbinclude DenseBase_LinSpaced_seq_deprecated.out
230
+ *
231
+ * \sa LinSpaced(Index,const Scalar&, const Scalar&), setLinSpaced(Index,const Scalar&,const Scalar&)
232
+ */
233
+ template <typename Derived>
234
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<
235
+ Derived>::RandomAccessLinSpacedReturnType
236
+ DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) {
237
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
238
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low, high, size));
239
+ }
240
+
241
+ /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)
242
+ *
243
+ * \sa LinSpaced(const Scalar&, const Scalar&)
244
+ */
245
+ template <typename Derived>
246
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<
247
+ Derived>::RandomAccessLinSpacedReturnType
248
+ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) {
249
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
250
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
251
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime,
252
+ internal::linspaced_op<Scalar>(low, high, Derived::SizeAtCompileTime));
253
+ }
254
+
255
+ /**
256
+ * \brief Sets a linearly spaced vector.
257
+ *
258
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
259
+ * When size is set to 1, a vector of length 1 containing 'high' is returned.
260
+ *
261
+ * \only_for_vectors
262
+ *
263
+ * Example: \include DenseBase_LinSpaced.cpp
264
+ * Output: \verbinclude DenseBase_LinSpaced.out
265
+ *
266
+ * For integer scalar types, an even spacing is possible if and only if the length of the range,
267
+ * i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the
268
+ * number of values \c high-low+1 (meaning each value can be repeated the same number of time).
269
+ * If one of these two considions is not satisfied, then \c high is lowered to the largest value
270
+ * satisfying one of this constraint.
271
+ * Here are some examples:
272
+ *
273
+ * Example: \include DenseBase_LinSpacedInt.cpp
274
+ * Output: \verbinclude DenseBase_LinSpacedInt.out
275
+ *
276
+ * \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
277
+ */
278
+ template <typename Derived>
279
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
280
+ DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high) {
281
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
282
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar>(low, high, size));
283
+ }
284
+
285
+ /**
286
+ * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)
287
+ * Special version for fixed size types which does not require the size parameter.
288
+ */
289
+ template <typename Derived>
290
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
291
+ DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high) {
292
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
293
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
294
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime,
295
+ internal::linspaced_op<Scalar>(low, high, Derived::SizeAtCompileTime));
296
+ }
297
+
298
+ template <typename Derived>
299
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessEqualSpacedReturnType
300
+ DenseBase<Derived>::EqualSpaced(Index size, const Scalar& low, const Scalar& step) {
301
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
302
+ return DenseBase<Derived>::NullaryExpr(size, internal::equalspaced_op<Scalar>(low, step));
303
+ }
304
+
305
+ template <typename Derived>
306
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessEqualSpacedReturnType
307
+ DenseBase<Derived>::EqualSpaced(const Scalar& low, const Scalar& step) {
308
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
309
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::equalspaced_op<Scalar>(low, step));
310
+ }
311
+
312
+ /** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
313
+ template <typename Derived>
314
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApproxToConstant(const Scalar& val, const RealScalar& prec) const {
315
+ typename internal::nested_eval<Derived, 1>::type self(derived());
316
+ for (Index j = 0; j < cols(); ++j)
317
+ for (Index i = 0; i < rows(); ++i)
318
+ if (!internal::isApprox(self.coeff(i, j), val, prec)) return false;
319
+ return true;
320
+ }
321
+
322
+ /** This is just an alias for isApproxToConstant().
323
+ *
324
+ * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
325
+ template <typename Derived>
326
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isConstant(const Scalar& val, const RealScalar& prec) const {
327
+ return isApproxToConstant(val, prec);
328
+ }
329
+
330
+ /** Alias for setConstant(): sets all coefficients in this expression to \a val.
331
+ *
332
+ * \sa setConstant(), Constant(), class CwiseNullaryOp
333
+ */
334
+ template <typename Derived>
335
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val) {
336
+ setConstant(val);
337
+ }
338
+
339
+ /** Sets all coefficients in this expression to value \a val.
340
+ *
341
+ * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(),
342
+ * Constant(), class CwiseNullaryOp, setZero(), setOnes()
343
+ */
344
+ template <typename Derived>
345
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val) {
346
+ return derived() = Constant(rows(), cols(), val);
347
+ }
348
+
349
+ /** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val.
350
+ *
351
+ * \only_for_vectors
352
+ *
353
+ * Example: \include Matrix_setConstant_int.cpp
354
+ * Output: \verbinclude Matrix_setConstant_int.out
355
+ *
356
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp,
357
+ * MatrixBase::Constant(const Scalar&)
358
+ */
359
+ template <typename Derived>
360
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val) {
361
+ resize(size);
362
+ return setConstant(val);
363
+ }
364
+
365
+ /** Resizes to the given size, and sets all coefficients in this expression to the given value \a val.
366
+ *
367
+ * \param rows the new number of rows
368
+ * \param cols the new number of columns
369
+ * \param val the value to which all coefficients are set
370
+ *
371
+ * Example: \include Matrix_setConstant_int_int.cpp
372
+ * Output: \verbinclude Matrix_setConstant_int_int.out
373
+ *
374
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
375
+ * MatrixBase::Constant(const Scalar&)
376
+ */
377
+ template <typename Derived>
378
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index rows, Index cols,
379
+ const Scalar& val) {
380
+ resize(rows, cols);
381
+ return setConstant(val);
382
+ }
383
+
384
+ /** Resizes to the given size, changing only the number of columns, and sets all
385
+ * coefficients in this expression to the given value \a val. For the parameter
386
+ * of type NoChange_t, just pass the special value \c NoChange.
387
+ *
388
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
389
+ * MatrixBase::Constant(const Scalar&)
390
+ */
391
+ template <typename Derived>
392
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(NoChange_t, Index cols,
393
+ const Scalar& val) {
394
+ return setConstant(rows(), cols, val);
395
+ }
396
+
397
+ /** Resizes to the given size, changing only the number of rows, and sets all
398
+ * coefficients in this expression to the given value \a val. For the parameter
399
+ * of type NoChange_t, just pass the special value \c NoChange.
400
+ *
401
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp,
402
+ * MatrixBase::Constant(const Scalar&)
403
+ */
404
+ template <typename Derived>
405
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index rows, NoChange_t,
406
+ const Scalar& val) {
407
+ return setConstant(rows, cols(), val);
408
+ }
409
+
410
+ /**
411
+ * \brief Sets a linearly spaced vector.
412
+ *
413
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
414
+ * When size is set to 1, a vector of length 1 containing 'high' is returned.
415
+ *
416
+ * \only_for_vectors
417
+ *
418
+ * Example: \include DenseBase_setLinSpaced.cpp
419
+ * Output: \verbinclude DenseBase_setLinSpaced.out
420
+ *
421
+ * For integer scalar types, do not miss the explanations on the definition
422
+ * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
423
+ *
424
+ * \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp
425
+ */
426
+ template <typename Derived>
427
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low,
428
+ const Scalar& high) {
429
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
430
+ return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar>(low, high, newSize));
431
+ }
432
+
433
+ /**
434
+ * \brief Sets a linearly spaced vector.
435
+ *
436
+ * The function fills \c *this with equally spaced values in the closed interval [low,high].
437
+ * When size is set to 1, a vector of length 1 containing 'high' is returned.
438
+ *
439
+ * \only_for_vectors
440
+ *
441
+ * For integer scalar types, do not miss the explanations on the definition
442
+ * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink.
443
+ *
444
+ * \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
445
+ */
446
+ template <typename Derived>
447
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high) {
448
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
449
+ return setLinSpaced(size(), low, high);
450
+ }
451
+
452
+ template <typename Derived>
453
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setEqualSpaced(Index newSize, const Scalar& low,
454
+ const Scalar& step) {
455
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
456
+ return derived() = Derived::NullaryExpr(newSize, internal::equalspaced_op<Scalar>(low, step));
457
+ }
458
+ template <typename Derived>
459
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setEqualSpaced(const Scalar& low,
460
+ const Scalar& step) {
461
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
462
+ return setEqualSpaced(size(), low, step);
463
+ }
464
+
465
+ // zero:
466
+
467
+ /** \returns an expression of a zero matrix.
468
+ *
469
+ * The parameters \a rows and \a cols are the number of rows and of columns of
470
+ * the returned matrix. Must be compatible with this MatrixBase type.
471
+ *
472
+ * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
473
+ * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
474
+ * instead.
475
+ *
476
+ * Example: \include MatrixBase_zero_int_int.cpp
477
+ * Output: \verbinclude MatrixBase_zero_int_int.out
478
+ *
479
+ * \sa Zero(), Zero(Index)
480
+ */
481
+ template <typename Derived>
482
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Zero(
483
+ Index rows, Index cols) {
484
+ return Constant(rows, cols, Scalar(0));
485
+ }
486
+
487
+ /** \returns an expression of a zero vector.
488
+ *
489
+ * The parameter \a size is the size of the returned vector.
490
+ * Must be compatible with this MatrixBase type.
491
+ *
492
+ * \only_for_vectors
493
+ *
494
+ * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
495
+ * it is redundant to pass \a size as argument, so Zero() should be used
496
+ * instead.
497
+ *
498
+ * Example: \include MatrixBase_zero_int.cpp
499
+ * Output: \verbinclude MatrixBase_zero_int.out
500
+ *
501
+ * \sa Zero(), Zero(Index,Index)
502
+ */
503
+ template <typename Derived>
504
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Zero(
505
+ Index size) {
506
+ return Constant(size, Scalar(0));
507
+ }
508
+
509
+ /** \returns an expression of a fixed-size zero matrix or vector.
510
+ *
511
+ * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
512
+ * need to use the variants taking size arguments.
513
+ *
514
+ * Example: \include MatrixBase_zero.cpp
515
+ * Output: \verbinclude MatrixBase_zero.out
516
+ *
517
+ * \sa Zero(Index), Zero(Index,Index)
518
+ */
519
+ template <typename Derived>
520
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Zero() {
521
+ return Constant(Scalar(0));
522
+ }
523
+
524
+ /** \returns true if *this is approximately equal to the zero matrix,
525
+ * within the precision given by \a prec.
526
+ *
527
+ * Example: \include MatrixBase_isZero.cpp
528
+ * Output: \verbinclude MatrixBase_isZero.out
529
+ *
530
+ * \sa class CwiseNullaryOp, Zero()
531
+ */
532
+ template <typename Derived>
533
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isZero(const RealScalar& prec) const {
534
+ typename internal::nested_eval<Derived, 1>::type self(derived());
535
+ for (Index j = 0; j < cols(); ++j)
536
+ for (Index i = 0; i < rows(); ++i)
537
+ if (!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<Scalar>(1), prec)) return false;
538
+ return true;
539
+ }
540
+
541
+ /** Sets all coefficients in this expression to zero.
542
+ *
543
+ * Example: \include MatrixBase_setZero.cpp
544
+ * Output: \verbinclude MatrixBase_setZero.out
545
+ *
546
+ * \sa class CwiseNullaryOp, Zero()
547
+ */
548
+ template <typename Derived>
549
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero() {
550
+ return setConstant(Scalar(0));
551
+ }
552
+
553
+ /** Resizes to the given \a size, and sets all coefficients in this expression to zero.
554
+ *
555
+ * \only_for_vectors
556
+ *
557
+ * Example: \include Matrix_setZero_int.cpp
558
+ * Output: \verbinclude Matrix_setZero_int.out
559
+ *
560
+ * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
561
+ */
562
+ template <typename Derived>
563
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index newSize) {
564
+ resize(newSize);
565
+ return setConstant(Scalar(0));
566
+ }
567
+
568
+ /** Resizes to the given size, and sets all coefficients in this expression to zero.
569
+ *
570
+ * \param rows the new number of rows
571
+ * \param cols the new number of columns
572
+ *
573
+ * Example: \include Matrix_setZero_int_int.cpp
574
+ * Output: \verbinclude Matrix_setZero_int_int.out
575
+ *
576
+ * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
577
+ */
578
+ template <typename Derived>
579
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index rows, Index cols) {
580
+ resize(rows, cols);
581
+ return setConstant(Scalar(0));
582
+ }
583
+
584
+ /** Resizes to the given size, changing only the number of columns, and sets all
585
+ * coefficients in this expression to zero. For the parameter of type NoChange_t,
586
+ * just pass the special value \c NoChange.
587
+ *
588
+ * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(Index, NoChange_t), class CwiseNullaryOp,
589
+ * DenseBase::Zero()
590
+ */
591
+ template <typename Derived>
592
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(NoChange_t, Index cols) {
593
+ return setZero(rows(), cols);
594
+ }
595
+
596
+ /** Resizes to the given size, changing only the number of rows, and sets all
597
+ * coefficients in this expression to zero. For the parameter of type NoChange_t,
598
+ * just pass the special value \c NoChange.
599
+ *
600
+ * \sa DenseBase::setZero(), setZero(Index), setZero(Index, Index), setZero(NoChange_t, Index), class CwiseNullaryOp,
601
+ * DenseBase::Zero()
602
+ */
603
+ template <typename Derived>
604
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index rows, NoChange_t) {
605
+ return setZero(rows, cols());
606
+ }
607
+
608
+ // ones:
609
+
610
+ /** \returns an expression of a matrix where all coefficients equal one.
611
+ *
612
+ * The parameters \a rows and \a cols are the number of rows and of columns of
613
+ * the returned matrix. Must be compatible with this MatrixBase type.
614
+ *
615
+ * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
616
+ * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
617
+ * instead.
618
+ *
619
+ * Example: \include MatrixBase_ones_int_int.cpp
620
+ * Output: \verbinclude MatrixBase_ones_int_int.out
621
+ *
622
+ * \sa Ones(), Ones(Index), isOnes(), class Ones
623
+ */
624
+ template <typename Derived>
625
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones(
626
+ Index rows, Index cols) {
627
+ return Constant(rows, cols, Scalar(1));
628
+ }
629
+
630
+ /** \returns an expression of a vector where all coefficients equal one.
631
+ *
632
+ * The parameter \a newSize is the size of the returned vector.
633
+ * Must be compatible with this MatrixBase type.
634
+ *
635
+ * \only_for_vectors
636
+ *
637
+ * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
638
+ * it is redundant to pass \a size as argument, so Ones() should be used
639
+ * instead.
640
+ *
641
+ * Example: \include MatrixBase_ones_int.cpp
642
+ * Output: \verbinclude MatrixBase_ones_int.out
643
+ *
644
+ * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
645
+ */
646
+ template <typename Derived>
647
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones(
648
+ Index newSize) {
649
+ return Constant(newSize, Scalar(1));
650
+ }
651
+
652
+ /** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
653
+ *
654
+ * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
655
+ * need to use the variants taking size arguments.
656
+ *
657
+ * Example: \include MatrixBase_ones.cpp
658
+ * Output: \verbinclude MatrixBase_ones.out
659
+ *
660
+ * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
661
+ */
662
+ template <typename Derived>
663
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones() {
664
+ return Constant(Scalar(1));
665
+ }
666
+
667
+ /** \returns true if *this is approximately equal to the matrix where all coefficients
668
+ * are equal to 1, within the precision given by \a prec.
669
+ *
670
+ * Example: \include MatrixBase_isOnes.cpp
671
+ * Output: \verbinclude MatrixBase_isOnes.out
672
+ *
673
+ * \sa class CwiseNullaryOp, Ones()
674
+ */
675
+ template <typename Derived>
676
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isOnes(const RealScalar& prec) const {
677
+ return isApproxToConstant(Scalar(1), prec);
678
+ }
679
+
680
+ /** Sets all coefficients in this expression to one.
681
+ *
682
+ * Example: \include MatrixBase_setOnes.cpp
683
+ * Output: \verbinclude MatrixBase_setOnes.out
684
+ *
685
+ * \sa class CwiseNullaryOp, Ones()
686
+ */
687
+ template <typename Derived>
688
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes() {
689
+ return setConstant(Scalar(1));
690
+ }
691
+
692
+ /** Resizes to the given \a newSize, and sets all coefficients in this expression to one.
693
+ *
694
+ * \only_for_vectors
695
+ *
696
+ * Example: \include Matrix_setOnes_int.cpp
697
+ * Output: \verbinclude Matrix_setOnes_int.out
698
+ *
699
+ * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
700
+ */
701
+ template <typename Derived>
702
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index newSize) {
703
+ resize(newSize);
704
+ return setConstant(Scalar(1));
705
+ }
706
+
707
+ /** Resizes to the given size, and sets all coefficients in this expression to one.
708
+ *
709
+ * \param rows the new number of rows
710
+ * \param cols the new number of columns
711
+ *
712
+ * Example: \include Matrix_setOnes_int_int.cpp
713
+ * Output: \verbinclude Matrix_setOnes_int_int.out
714
+ *
715
+ * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
716
+ */
717
+ template <typename Derived>
718
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index rows, Index cols) {
719
+ resize(rows, cols);
720
+ return setConstant(Scalar(1));
721
+ }
722
+
723
+ /** Resizes to the given size, changing only the number of rows, and sets all
724
+ * coefficients in this expression to one. For the parameter of type NoChange_t,
725
+ * just pass the special value \c NoChange.
726
+ *
727
+ * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(NoChange_t, Index), class CwiseNullaryOp,
728
+ * MatrixBase::Ones()
729
+ */
730
+ template <typename Derived>
731
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index rows, NoChange_t) {
732
+ return setOnes(rows, cols());
733
+ }
734
+
735
+ /** Resizes to the given size, changing only the number of columns, and sets all
736
+ * coefficients in this expression to one. For the parameter of type NoChange_t,
737
+ * just pass the special value \c NoChange.
738
+ *
739
+ * \sa MatrixBase::setOnes(), setOnes(Index), setOnes(Index, Index), setOnes(Index, NoChange_t) class CwiseNullaryOp,
740
+ * MatrixBase::Ones()
741
+ */
742
+ template <typename Derived>
743
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(NoChange_t, Index cols) {
744
+ return setOnes(rows(), cols);
745
+ }
746
+
747
+ // Identity:
748
+
749
+ /** \returns an expression of the identity matrix (not necessarily square).
750
+ *
751
+ * The parameters \a rows and \a cols are the number of rows and of columns of
752
+ * the returned matrix. Must be compatible with this MatrixBase type.
753
+ *
754
+ * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
755
+ * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
756
+ * instead.
757
+ *
758
+ * Example: \include MatrixBase_identity_int_int.cpp
759
+ * Output: \verbinclude MatrixBase_identity_int_int.out
760
+ *
761
+ * \sa Identity(), setIdentity(), isIdentity()
762
+ */
763
+ template <typename Derived>
764
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
765
+ MatrixBase<Derived>::Identity(Index rows, Index cols) {
766
+ return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
767
+ }
768
+
769
+ /** \returns an expression of the identity matrix (not necessarily square).
770
+ *
771
+ * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
772
+ * need to use the variant taking size arguments.
773
+ *
774
+ * Example: \include MatrixBase_identity.cpp
775
+ * Output: \verbinclude MatrixBase_identity.out
776
+ *
777
+ * \sa Identity(Index,Index), setIdentity(), isIdentity()
778
+ */
779
+ template <typename Derived>
780
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
781
+ MatrixBase<Derived>::Identity() {
782
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
783
+ return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());
784
+ }
785
+
786
+ /** \returns true if *this is approximately equal to the identity matrix
787
+ * (not necessarily square),
788
+ * within the precision given by \a prec.
789
+ *
790
+ * Example: \include MatrixBase_isIdentity.cpp
791
+ * Output: \verbinclude MatrixBase_isIdentity.out
792
+ *
793
+ * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
794
+ */
795
+ template <typename Derived>
796
+ bool MatrixBase<Derived>::isIdentity(const RealScalar& prec) const {
797
+ typename internal::nested_eval<Derived, 1>::type self(derived());
798
+ for (Index j = 0; j < cols(); ++j) {
799
+ for (Index i = 0; i < rows(); ++i) {
800
+ if (i == j) {
801
+ if (!internal::isApprox(self.coeff(i, j), static_cast<Scalar>(1), prec)) return false;
802
+ } else {
803
+ if (!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<RealScalar>(1), prec)) return false;
804
+ }
805
+ }
806
+ }
807
+ return true;
808
+ }
809
+
810
+ namespace internal {
811
+
812
+ template <typename Derived, bool Big = (Derived::SizeAtCompileTime >= 16)>
813
+ struct setIdentity_impl {
814
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) {
815
+ return m = Derived::Identity(m.rows(), m.cols());
816
+ }
817
+ };
818
+
819
+ template <typename Derived>
820
+ struct setIdentity_impl<Derived, true> {
821
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) {
822
+ m.setZero();
823
+ const Index size = numext::mini(m.rows(), m.cols());
824
+ for (Index i = 0; i < size; ++i) m.coeffRef(i, i) = typename Derived::Scalar(1);
825
+ return m;
826
+ }
827
+ };
828
+
829
+ } // end namespace internal
830
+
831
+ /** Writes the identity expression (not necessarily square) into *this.
832
+ *
833
+ * Example: \include MatrixBase_setIdentity.cpp
834
+ * Output: \verbinclude MatrixBase_setIdentity.out
835
+ *
836
+ * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
837
+ */
838
+ template <typename Derived>
839
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity() {
840
+ return internal::setIdentity_impl<Derived>::run(derived());
841
+ }
842
+
843
+ /** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
844
+ *
845
+ * \param rows the new number of rows
846
+ * \param cols the new number of columns
847
+ *
848
+ * Example: \include Matrix_setIdentity_int_int.cpp
849
+ * Output: \verbinclude Matrix_setIdentity_int_int.out
850
+ *
851
+ * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
852
+ */
853
+ template <typename Derived>
854
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols) {
855
+ derived().resize(rows, cols);
856
+ return setIdentity();
857
+ }
858
+
859
+ /** \returns an expression of the i-th unit (basis) vector.
860
+ *
861
+ * \only_for_vectors
862
+ *
863
+ * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
864
+ */
865
+ template <typename Derived>
866
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(
867
+ Index newSize, Index i) {
868
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
869
+ return BasisReturnType(SquareMatrixType::Identity(newSize, newSize), i);
870
+ }
871
+
872
+ /** \returns an expression of the i-th unit (basis) vector.
873
+ *
874
+ * \only_for_vectors
875
+ *
876
+ * This variant is for fixed-size vector only.
877
+ *
878
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
879
+ */
880
+ template <typename Derived>
881
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(
882
+ Index i) {
883
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
884
+ return BasisReturnType(SquareMatrixType::Identity(), i);
885
+ }
886
+
887
+ /** \returns an expression of the X axis unit vector (1{,0}^*)
888
+ *
889
+ * \only_for_vectors
890
+ *
891
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
892
+ * MatrixBase::UnitW()
893
+ */
894
+ template <typename Derived>
895
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX() {
896
+ return Derived::Unit(0);
897
+ }
898
+
899
+ /** \returns an expression of the Y axis unit vector (0,1{,0}^*)
900
+ *
901
+ * \only_for_vectors
902
+ *
903
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
904
+ * MatrixBase::UnitW()
905
+ */
906
+ template <typename Derived>
907
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY() {
908
+ return Derived::Unit(1);
909
+ }
910
+
911
+ /** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
912
+ *
913
+ * \only_for_vectors
914
+ *
915
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
916
+ * MatrixBase::UnitW()
917
+ */
918
+ template <typename Derived>
919
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ() {
920
+ return Derived::Unit(2);
921
+ }
922
+
923
+ /** \returns an expression of the W axis unit vector (0,0,0,1)
924
+ *
925
+ * \only_for_vectors
926
+ *
927
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(),
928
+ * MatrixBase::UnitW()
929
+ */
930
+ template <typename Derived>
931
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW() {
932
+ return Derived::Unit(3);
933
+ }
934
+
935
+ /** \brief Set the coefficients of \c *this to the i-th unit (basis) vector
936
+ *
937
+ * \param i index of the unique coefficient to be set to 1
938
+ *
939
+ * \only_for_vectors
940
+ *
941
+ * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
942
+ */
943
+ template <typename Derived>
944
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setUnit(Index i) {
945
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
946
+ eigen_assert(i < size());
947
+ derived().setZero();
948
+ derived().coeffRef(i) = Scalar(1);
949
+ return derived();
950
+ }
951
+
952
+ /** \brief Resizes to the given \a newSize, and writes the i-th unit (basis) vector into *this.
953
+ *
954
+ * \param newSize the new size of the vector
955
+ * \param i index of the unique coefficient to be set to 1
956
+ *
957
+ * \only_for_vectors
958
+ *
959
+ * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Unit(Index,Index)
960
+ */
961
+ template <typename Derived>
962
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setUnit(Index newSize, Index i) {
963
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
964
+ eigen_assert(i < newSize);
965
+ derived().resize(newSize);
966
+ return setUnit(i);
967
+ }
968
+
969
+ } // end namespace Eigen
970
+
971
+ #endif // EIGEN_CWISE_NULLARY_OP_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseTernaryOp.h ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ // Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ #ifndef EIGEN_CWISE_TERNARY_OP_H
13
+ #define EIGEN_CWISE_TERNARY_OP_H
14
+
15
+ // IWYU pragma: private
16
+ #include "./InternalHeaderCheck.h"
17
+
18
+ namespace Eigen {
19
+
20
+ namespace internal {
21
+ template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
22
+ struct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>> {
23
+ // we must not inherit from traits<Arg1> since it has
24
+ // the potential to cause problems with MSVC
25
+ typedef remove_all_t<Arg1> Ancestor;
26
+ typedef typename traits<Ancestor>::XprKind XprKind;
27
+ enum {
28
+ RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
29
+ ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
30
+ MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
31
+ MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
32
+ };
33
+
34
+ // even though we require Arg1, Arg2, and Arg3 to have the same scalar type
35
+ // (see CwiseTernaryOp constructor),
36
+ // we still want to handle the case when the result type is different.
37
+ typedef typename result_of<TernaryOp(const typename Arg1::Scalar&, const typename Arg2::Scalar&,
38
+ const typename Arg3::Scalar&)>::type Scalar;
39
+
40
+ typedef typename internal::traits<Arg1>::StorageKind StorageKind;
41
+ typedef typename internal::traits<Arg1>::StorageIndex StorageIndex;
42
+
43
+ typedef typename Arg1::Nested Arg1Nested;
44
+ typedef typename Arg2::Nested Arg2Nested;
45
+ typedef typename Arg3::Nested Arg3Nested;
46
+ typedef std::remove_reference_t<Arg1Nested> Arg1Nested_;
47
+ typedef std::remove_reference_t<Arg2Nested> Arg2Nested_;
48
+ typedef std::remove_reference_t<Arg3Nested> Arg3Nested_;
49
+ enum { Flags = Arg1Nested_::Flags & RowMajorBit };
50
+ };
51
+ } // end namespace internal
52
+
53
+ template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind>
54
+ class CwiseTernaryOpImpl;
55
+
56
+ /** \class CwiseTernaryOp
57
+ * \ingroup Core_Module
58
+ *
59
+ * \brief Generic expression where a coefficient-wise ternary operator is
60
+ * applied to two expressions
61
+ *
62
+ * \tparam TernaryOp template functor implementing the operator
63
+ * \tparam Arg1Type the type of the first argument
64
+ * \tparam Arg2Type the type of the second argument
65
+ * \tparam Arg3Type the type of the third argument
66
+ *
67
+ * This class represents an expression where a coefficient-wise ternary
68
+ * operator is applied to three expressions.
69
+ * It is the return type of ternary operators, by which we mean only those
70
+ * ternary operators where
71
+ * all three arguments are Eigen expressions.
72
+ * For example, the return type of betainc(matrix1, matrix2, matrix3) is a
73
+ * CwiseTernaryOp.
74
+ *
75
+ * Most of the time, this is the only way that it is used, so you typically
76
+ * don't have to name
77
+ * CwiseTernaryOp types explicitly.
78
+ *
79
+ * \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const
80
+ * MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp,
81
+ * class CwiseUnaryOp, class CwiseNullaryOp
82
+ */
83
+ template <typename TernaryOp, typename Arg1Type, typename Arg2Type, typename Arg3Type>
84
+ class CwiseTernaryOp : public CwiseTernaryOpImpl<TernaryOp, Arg1Type, Arg2Type, Arg3Type,
85
+ typename internal::traits<Arg1Type>::StorageKind>,
86
+ internal::no_assignment_operator {
87
+ public:
88
+ typedef internal::remove_all_t<Arg1Type> Arg1;
89
+ typedef internal::remove_all_t<Arg2Type> Arg2;
90
+ typedef internal::remove_all_t<Arg3Type> Arg3;
91
+
92
+ // require the sizes to match
93
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2)
94
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3)
95
+
96
+ // The index types should match
97
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
98
+ typename internal::traits<Arg2Type>::StorageKind>::value),
99
+ STORAGE_KIND_MUST_MATCH)
100
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Arg1Type>::StorageKind,
101
+ typename internal::traits<Arg3Type>::StorageKind>::value),
102
+ STORAGE_KIND_MUST_MATCH)
103
+
104
+ typedef typename CwiseTernaryOpImpl<TernaryOp, Arg1Type, Arg2Type, Arg3Type,
105
+ typename internal::traits<Arg1Type>::StorageKind>::Base Base;
106
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp)
107
+
108
+ typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested;
109
+ typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested;
110
+ typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested;
111
+ typedef std::remove_reference_t<Arg1Nested> Arg1Nested_;
112
+ typedef std::remove_reference_t<Arg2Nested> Arg2Nested_;
113
+ typedef std::remove_reference_t<Arg3Nested> Arg3Nested_;
114
+
115
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, const Arg3& a3,
116
+ const TernaryOp& func = TernaryOp())
117
+ : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) {
118
+ eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && a1.rows() == a3.rows() && a1.cols() == a3.cols());
119
+ }
120
+
121
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const {
122
+ // return the fixed size type if available to enable compile time
123
+ // optimizations
124
+ if (internal::traits<internal::remove_all_t<Arg1Nested>>::RowsAtCompileTime == Dynamic &&
125
+ internal::traits<internal::remove_all_t<Arg2Nested>>::RowsAtCompileTime == Dynamic)
126
+ return m_arg3.rows();
127
+ else if (internal::traits<internal::remove_all_t<Arg1Nested>>::RowsAtCompileTime == Dynamic &&
128
+ internal::traits<internal::remove_all_t<Arg3Nested>>::RowsAtCompileTime == Dynamic)
129
+ return m_arg2.rows();
130
+ else
131
+ return m_arg1.rows();
132
+ }
133
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const {
134
+ // return the fixed size type if available to enable compile time
135
+ // optimizations
136
+ if (internal::traits<internal::remove_all_t<Arg1Nested>>::ColsAtCompileTime == Dynamic &&
137
+ internal::traits<internal::remove_all_t<Arg2Nested>>::ColsAtCompileTime == Dynamic)
138
+ return m_arg3.cols();
139
+ else if (internal::traits<internal::remove_all_t<Arg1Nested>>::ColsAtCompileTime == Dynamic &&
140
+ internal::traits<internal::remove_all_t<Arg3Nested>>::ColsAtCompileTime == Dynamic)
141
+ return m_arg2.cols();
142
+ else
143
+ return m_arg1.cols();
144
+ }
145
+
146
+ /** \returns the first argument nested expression */
147
+ EIGEN_DEVICE_FUNC const Arg1Nested_& arg1() const { return m_arg1; }
148
+ /** \returns the first argument nested expression */
149
+ EIGEN_DEVICE_FUNC const Arg2Nested_& arg2() const { return m_arg2; }
150
+ /** \returns the third argument nested expression */
151
+ EIGEN_DEVICE_FUNC const Arg3Nested_& arg3() const { return m_arg3; }
152
+ /** \returns the functor representing the ternary operation */
153
+ EIGEN_DEVICE_FUNC const TernaryOp& functor() const { return m_functor; }
154
+
155
+ protected:
156
+ Arg1Nested m_arg1;
157
+ Arg2Nested m_arg2;
158
+ Arg3Nested m_arg3;
159
+ const TernaryOp m_functor;
160
+ };
161
+
162
+ // Generic API dispatcher
163
+ template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind>
164
+ class CwiseTernaryOpImpl : public internal::generic_xpr_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>>::type {
165
+ public:
166
+ typedef typename internal::generic_xpr_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>>::type Base;
167
+ };
168
+
169
+ } // end namespace Eigen
170
+
171
+ #endif // EIGEN_CWISE_TERNARY_OP_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseUnaryOp.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_CWISE_UNARY_OP_H
12
+ #define EIGEN_CWISE_UNARY_OP_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+ template <typename UnaryOp, typename XprType>
21
+ struct traits<CwiseUnaryOp<UnaryOp, XprType> > : traits<XprType> {
22
+ typedef typename result_of<UnaryOp(const typename XprType::Scalar&)>::type Scalar;
23
+ typedef typename XprType::Nested XprTypeNested;
24
+ typedef std::remove_reference_t<XprTypeNested> XprTypeNested_;
25
+ enum { Flags = XprTypeNested_::Flags & RowMajorBit };
26
+ };
27
+ } // namespace internal
28
+
29
+ template <typename UnaryOp, typename XprType, typename StorageKind>
30
+ class CwiseUnaryOpImpl;
31
+
32
+ /** \class CwiseUnaryOp
33
+ * \ingroup Core_Module
34
+ *
35
+ * \brief Generic expression where a coefficient-wise unary operator is applied to an expression
36
+ *
37
+ * \tparam UnaryOp template functor implementing the operator
38
+ * \tparam XprType the type of the expression to which we are applying the unary operator
39
+ *
40
+ * This class represents an expression where a unary operator is applied to an expression.
41
+ * It is the return type of all operations taking exactly 1 input expression, regardless of the
42
+ * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
43
+ * is considered unary, because only the right-hand side is an expression, and its
44
+ * return type is a specialization of CwiseUnaryOp.
45
+ *
46
+ * Most of the time, this is the only way that it is used, so you typically don't have to name
47
+ * CwiseUnaryOp types explicitly.
48
+ *
49
+ * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
50
+ */
51
+ template <typename UnaryOp, typename XprType>
52
+ class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>,
53
+ internal::no_assignment_operator {
54
+ public:
55
+ typedef typename CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>::Base Base;
56
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
57
+ typedef typename internal::ref_selector<XprType>::type XprTypeNested;
58
+ typedef internal::remove_all_t<XprType> NestedExpression;
59
+
60
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
61
+ : m_xpr(xpr), m_functor(func) {}
62
+
63
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.rows(); }
64
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.cols(); }
65
+
66
+ /** \returns the functor representing the unary operation */
67
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& functor() const { return m_functor; }
68
+
69
+ /** \returns the nested expression */
70
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all_t<XprTypeNested>& nestedExpression() const {
71
+ return m_xpr;
72
+ }
73
+
74
+ /** \returns the nested expression */
75
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::remove_all_t<XprTypeNested>& nestedExpression() { return m_xpr; }
76
+
77
+ protected:
78
+ XprTypeNested m_xpr;
79
+ const UnaryOp m_functor;
80
+ };
81
+
82
+ // Generic API dispatcher
83
+ template <typename UnaryOp, typename XprType, typename StorageKind>
84
+ class CwiseUnaryOpImpl : public internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type {
85
+ public:
86
+ typedef typename internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
87
+ };
88
+
89
+ } // end namespace Eigen
90
+
91
+ #endif // EIGEN_CWISE_UNARY_OP_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/CwiseUnaryView.h ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_CWISE_UNARY_VIEW_H
11
+ #define EIGEN_CWISE_UNARY_VIEW_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+ template <typename ViewOp, typename MatrixType, typename StrideType>
20
+ struct traits<CwiseUnaryView<ViewOp, MatrixType, StrideType> > : traits<MatrixType> {
21
+ typedef typename result_of<ViewOp(typename traits<MatrixType>::Scalar&)>::type1 ScalarRef;
22
+ static_assert(std::is_reference<ScalarRef>::value, "Views must return a reference type.");
23
+ typedef remove_all_t<ScalarRef> Scalar;
24
+ typedef typename MatrixType::Nested MatrixTypeNested;
25
+ typedef remove_all_t<MatrixTypeNested> MatrixTypeNested_;
26
+ enum {
27
+ FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
28
+ Flags =
29
+ traits<MatrixTypeNested_>::Flags &
30
+ (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions
31
+ MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
32
+ // need to cast the sizeof's from size_t to int explicitly, otherwise:
33
+ // "error: no integral type can represent all of the enumerator values
34
+ InnerStrideAtCompileTime =
35
+ StrideType::InnerStrideAtCompileTime == 0
36
+ ? (MatrixTypeInnerStride == Dynamic
37
+ ? int(Dynamic)
38
+ : int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)))
39
+ : int(StrideType::InnerStrideAtCompileTime),
40
+
41
+ OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
42
+ ? (outer_stride_at_compile_time<MatrixType>::ret == Dynamic
43
+ ? int(Dynamic)
44
+ : outer_stride_at_compile_time<MatrixType>::ret *
45
+ int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)))
46
+ : int(StrideType::OuterStrideAtCompileTime)
47
+ };
48
+ };
49
+
50
+ // Generic API dispatcher
51
+ template <typename ViewOp, typename XprType, typename StrideType, typename StorageKind,
52
+ bool Mutable = !std::is_const<XprType>::value>
53
+ class CwiseUnaryViewImpl : public generic_xpr_base<CwiseUnaryView<ViewOp, XprType, StrideType> >::type {
54
+ public:
55
+ typedef typename generic_xpr_base<CwiseUnaryView<ViewOp, XprType, StrideType> >::type Base;
56
+ };
57
+
58
+ template <typename ViewOp, typename MatrixType, typename StrideType>
59
+ class CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, false>
60
+ : public dense_xpr_base<CwiseUnaryView<ViewOp, MatrixType, StrideType> >::type {
61
+ public:
62
+ typedef CwiseUnaryView<ViewOp, MatrixType, StrideType> Derived;
63
+ typedef typename dense_xpr_base<CwiseUnaryView<ViewOp, MatrixType, StrideType> >::type Base;
64
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
65
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
66
+
67
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeffRef(0)); }
68
+
69
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const {
70
+ return StrideType::InnerStrideAtCompileTime != 0 ? int(StrideType::InnerStrideAtCompileTime)
71
+ : derived().nestedExpression().innerStride() *
72
+ sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar);
73
+ }
74
+
75
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const {
76
+ return StrideType::OuterStrideAtCompileTime != 0 ? int(StrideType::OuterStrideAtCompileTime)
77
+ : derived().nestedExpression().outerStride() *
78
+ sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar);
79
+ }
80
+
81
+ protected:
82
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl)
83
+
84
+ // Allow const access to coeffRef for the case of direct access being enabled.
85
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const {
86
+ return internal::evaluator<Derived>(derived()).coeffRef(index);
87
+ }
88
+
89
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index col) const {
90
+ return internal::evaluator<Derived>(derived()).coeffRef(row, col);
91
+ }
92
+ };
93
+
94
+ template <typename ViewOp, typename MatrixType, typename StrideType>
95
+ class CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, true>
96
+ : public CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, false> {
97
+ public:
98
+ typedef CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType, Dense, false> Base;
99
+ typedef CwiseUnaryView<ViewOp, MatrixType, StrideType> Derived;
100
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
101
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
102
+
103
+ using Base::data;
104
+ EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); }
105
+
106
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) {
107
+ return internal::evaluator<Derived>(derived()).coeffRef(row, col);
108
+ }
109
+
110
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
111
+ return internal::evaluator<Derived>(derived()).coeffRef(index);
112
+ }
113
+
114
+ protected:
115
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl)
116
+ };
117
+
118
+ } // namespace internal
119
+
120
+ /** \class CwiseUnaryView
121
+ * \ingroup Core_Module
122
+ *
123
+ * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
124
+ *
125
+ * \tparam ViewOp template functor implementing the view
126
+ * \tparam MatrixType the type of the matrix we are applying the unary operator
127
+ *
128
+ * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
129
+ * It is the return type of real() and imag(), and most of the time this is the only way it is used.
130
+ *
131
+ * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
132
+ */
133
+ template <typename ViewOp, typename MatrixType, typename StrideType>
134
+ class CwiseUnaryView : public internal::CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType,
135
+ typename internal::traits<MatrixType>::StorageKind> {
136
+ public:
137
+ typedef typename internal::CwiseUnaryViewImpl<ViewOp, MatrixType, StrideType,
138
+ typename internal::traits<MatrixType>::StorageKind>::Base Base;
139
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)
140
+ typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
141
+ typedef internal::remove_all_t<MatrixType> NestedExpression;
142
+
143
+ explicit EIGEN_DEVICE_FUNC inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp())
144
+ : m_matrix(mat), m_functor(func) {}
145
+
146
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
147
+
148
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
149
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
150
+
151
+ /** \returns the functor representing unary operation */
152
+ EIGEN_DEVICE_FUNC const ViewOp& functor() const { return m_functor; }
153
+
154
+ /** \returns the nested expression */
155
+ EIGEN_DEVICE_FUNC const internal::remove_all_t<MatrixTypeNested>& nestedExpression() const { return m_matrix; }
156
+
157
+ /** \returns the nested expression */
158
+ EIGEN_DEVICE_FUNC std::remove_reference_t<MatrixTypeNested>& nestedExpression() { return m_matrix; }
159
+
160
+ protected:
161
+ MatrixTypeNested m_matrix;
162
+ ViewOp m_functor;
163
+ };
164
+
165
+ } // namespace Eigen
166
+
167
+ #endif // EIGEN_CWISE_UNARY_VIEW_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DenseBase.h ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_DENSEBASE_H
12
+ #define EIGEN_DENSEBASE_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ // The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
20
+ EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
21
+
22
+ /** \class DenseBase
23
+ * \ingroup Core_Module
24
+ *
25
+ * \brief Base class for all dense matrices, vectors, and arrays
26
+ *
27
+ * This class is the base that is inherited by all dense objects (matrix, vector, arrays,
28
+ * and related expression types). The common Eigen API for dense objects is contained in this class.
29
+ *
30
+ * \tparam Derived is the derived type, e.g., a matrix type or an expression.
31
+ *
32
+ * This class can be extended with the help of the plugin mechanism described on the page
33
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
34
+ *
35
+ * \sa \blank \ref TopicClassHierarchy
36
+ */
37
+ template <typename Derived>
38
+ class DenseBase
39
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
40
+ : public DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value>
41
+ #else
42
+ : public DenseCoeffsBase<Derived, DirectWriteAccessors>
43
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
44
+ {
45
+ public:
46
+ /** Inner iterator type to iterate over the coefficients of a row or column.
47
+ * \sa class InnerIterator
48
+ */
49
+ typedef Eigen::InnerIterator<Derived> InnerIterator;
50
+
51
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
52
+
53
+ /**
54
+ * \brief The type used to store indices
55
+ * \details This typedef is relevant for types that store multiple indices such as
56
+ * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index
57
+ * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.
58
+ */
59
+ typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
60
+
61
+ /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */
62
+ typedef typename internal::traits<Derived>::Scalar Scalar;
63
+
64
+ /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
65
+ *
66
+ * It is an alias for the Scalar type */
67
+ typedef Scalar value_type;
68
+
69
+ typedef typename NumTraits<Scalar>::Real RealScalar;
70
+ typedef DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> Base;
71
+
72
+ using Base::coeff;
73
+ using Base::coeffByOuterInner;
74
+ using Base::colIndexByOuterInner;
75
+ using Base::cols;
76
+ using Base::const_cast_derived;
77
+ using Base::derived;
78
+ using Base::rowIndexByOuterInner;
79
+ using Base::rows;
80
+ using Base::size;
81
+ using Base::operator();
82
+ using Base::operator[];
83
+ using Base::colStride;
84
+ using Base::innerStride;
85
+ using Base::outerStride;
86
+ using Base::rowStride;
87
+ using Base::stride;
88
+ using Base::w;
89
+ using Base::x;
90
+ using Base::y;
91
+ using Base::z;
92
+ typedef typename Base::CoeffReturnType CoeffReturnType;
93
+
94
+ enum {
95
+
96
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
97
+ /**< The number of rows at compile-time. This is just a copy of the value provided
98
+ * by the \a Derived type. If a value is not known at compile-time,
99
+ * it is set to the \a Dynamic constant.
100
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
101
+
102
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
103
+ /**< The number of columns at compile-time. This is just a copy of the value provided
104
+ * by the \a Derived type. If a value is not known at compile-time,
105
+ * it is set to the \a Dynamic constant.
106
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
107
+
108
+ SizeAtCompileTime = (internal::size_of_xpr_at_compile_time<Derived>::ret),
109
+ /**< This is equal to the number of coefficients, i.e. the number of
110
+ * rows times the number of columns, or to \a Dynamic if this is not
111
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
112
+
113
+ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
114
+ /**< This value is equal to the maximum possible number of rows that this expression
115
+ * might have. If this expression might have an arbitrarily high number of rows,
116
+ * this value is set to \a Dynamic.
117
+ *
118
+ * This value is useful to know when evaluating an expression, in order to determine
119
+ * whether it is possible to avoid doing a dynamic memory allocation.
120
+ *
121
+ * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
122
+ */
123
+
124
+ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
125
+ /**< This value is equal to the maximum possible number of columns that this expression
126
+ * might have. If this expression might have an arbitrarily high number of columns,
127
+ * this value is set to \a Dynamic.
128
+ *
129
+ * This value is useful to know when evaluating an expression, in order to determine
130
+ * whether it is possible to avoid doing a dynamic memory allocation.
131
+ *
132
+ * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
133
+ */
134
+
135
+ MaxSizeAtCompileTime = internal::size_at_compile_time(internal::traits<Derived>::MaxRowsAtCompileTime,
136
+ internal::traits<Derived>::MaxColsAtCompileTime),
137
+ /**< This value is equal to the maximum possible number of coefficients that this expression
138
+ * might have. If this expression might have an arbitrarily high number of coefficients,
139
+ * this value is set to \a Dynamic.
140
+ *
141
+ * This value is useful to know when evaluating an expression, in order to determine
142
+ * whether it is possible to avoid doing a dynamic memory allocation.
143
+ *
144
+ * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
145
+ */
146
+
147
+ IsVectorAtCompileTime =
148
+ internal::traits<Derived>::RowsAtCompileTime == 1 || internal::traits<Derived>::ColsAtCompileTime == 1,
149
+ /**< This is set to true if either the number of rows or the number of
150
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
151
+ * we are dealing with a column-vector (if there is only one column) or with
152
+ * a row-vector (if there is only one row). */
153
+
154
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0
155
+ : bool(IsVectorAtCompileTime) ? 1
156
+ : 2,
157
+ /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
158
+ * and 2 for matrices.
159
+ */
160
+
161
+ Flags = internal::traits<Derived>::Flags,
162
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
163
+ * constructed from this one. See the \ref flags "list of flags".
164
+ */
165
+
166
+ IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */
167
+
168
+ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
169
+ : int(IsRowMajor) ? int(ColsAtCompileTime)
170
+ : int(RowsAtCompileTime),
171
+
172
+ InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
173
+ OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
174
+ };
175
+
176
+ typedef typename internal::find_best_packet<Scalar, SizeAtCompileTime>::type PacketScalar;
177
+
178
+ enum { IsPlainObjectBase = 0 };
179
+
180
+ /** The plain matrix type corresponding to this expression.
181
+ * \sa PlainObject */
182
+ typedef Matrix<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime,
183
+ internal::traits<Derived>::ColsAtCompileTime,
184
+ AutoAlign | (internal::traits<Derived>::Flags & RowMajorBit ? RowMajor : ColMajor),
185
+ internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>
186
+ PlainMatrix;
187
+
188
+ /** The plain array type corresponding to this expression.
189
+ * \sa PlainObject */
190
+ typedef Array<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime,
191
+ internal::traits<Derived>::ColsAtCompileTime,
192
+ AutoAlign | (internal::traits<Derived>::Flags & RowMajorBit ? RowMajor : ColMajor),
193
+ internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>
194
+ PlainArray;
195
+
196
+ /** \brief The plain matrix or array type corresponding to this expression.
197
+ *
198
+ * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
199
+ * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
200
+ * that the return type of eval() is either PlainObject or const PlainObject&.
201
+ */
202
+ typedef std::conditional_t<internal::is_same<typename internal::traits<Derived>::XprKind, MatrixXpr>::value,
203
+ PlainMatrix, PlainArray>
204
+ PlainObject;
205
+
206
+ /** \returns the outer size.
207
+ *
208
+ * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
209
+ * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
210
+ * column-major matrix, and the number of rows for a row-major matrix. */
211
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index outerSize() const {
212
+ return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols();
213
+ }
214
+
215
+ /** \returns the inner size.
216
+ *
217
+ * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
218
+ * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
219
+ * column-major matrix, and the number of columns for a row-major matrix. */
220
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index innerSize() const {
221
+ return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows();
222
+ }
223
+
224
+ /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
225
+ * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and
226
+ * does nothing else.
227
+ */
228
+ EIGEN_DEVICE_FUNC void resize(Index newSize) {
229
+ EIGEN_ONLY_USED_FOR_DEBUG(newSize);
230
+ eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize.");
231
+ }
232
+ /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
233
+ * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and
234
+ * does nothing else.
235
+ */
236
+ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) {
237
+ EIGEN_ONLY_USED_FOR_DEBUG(rows);
238
+ EIGEN_ONLY_USED_FOR_DEBUG(cols);
239
+ eigen_assert(rows == this->rows() && cols == this->cols() &&
240
+ "DenseBase::resize() does not actually allow to resize.");
241
+ }
242
+
243
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
244
+ /** \internal Represents a matrix with all coefficients equal to one another*/
245
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> ConstantReturnType;
246
+ /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */
247
+ EIGEN_DEPRECATED typedef CwiseNullaryOp<internal::linspaced_op<Scalar>, PlainObject> SequentialLinSpacedReturnType;
248
+ /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
249
+ typedef CwiseNullaryOp<internal::linspaced_op<Scalar>, PlainObject> RandomAccessLinSpacedReturnType;
250
+ /** \internal Represents a vector with equally spaced coefficients that allows random access. */
251
+ typedef CwiseNullaryOp<internal::equalspaced_op<Scalar>, PlainObject> RandomAccessEqualSpacedReturnType;
252
+ /** \internal the return type of MatrixBase::eigenvalues() */
253
+ typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
254
+ internal::traits<Derived>::ColsAtCompileTime, 1>
255
+ EigenvaluesReturnType;
256
+
257
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
258
+
259
+ /** Copies \a other into *this. \returns a reference to *this. */
260
+ template <typename OtherDerived>
261
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other);
262
+
263
+ /** Special case of the template operator=, in order to prevent the compiler
264
+ * from generating a default operator= (issue hit with g++ 4.1)
265
+ */
266
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other);
267
+
268
+ template <typename OtherDerived>
269
+ EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived>& other);
270
+
271
+ template <typename OtherDerived>
272
+ EIGEN_DEVICE_FUNC Derived& operator+=(const EigenBase<OtherDerived>& other);
273
+
274
+ template <typename OtherDerived>
275
+ EIGEN_DEVICE_FUNC Derived& operator-=(const EigenBase<OtherDerived>& other);
276
+
277
+ template <typename OtherDerived>
278
+ EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& func);
279
+
280
+ /** \internal
281
+ * Copies \a other into *this without evaluating other. \returns a reference to *this. */
282
+ template <typename OtherDerived>
283
+ /** \deprecated */
284
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC Derived& lazyAssign(const DenseBase<OtherDerived>& other);
285
+
286
+ EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<<(const Scalar& s);
287
+
288
+ template <unsigned int Added, unsigned int Removed>
289
+ /** \deprecated it now returns \c *this */
290
+ EIGEN_DEPRECATED const Derived& flagged() const {
291
+ return derived();
292
+ }
293
+
294
+ template <typename OtherDerived>
295
+ EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<<(const DenseBase<OtherDerived>& other);
296
+
297
+ typedef Transpose<Derived> TransposeReturnType;
298
+ EIGEN_DEVICE_FUNC TransposeReturnType transpose();
299
+ typedef Transpose<const Derived> ConstTransposeReturnType;
300
+ EIGEN_DEVICE_FUNC const ConstTransposeReturnType transpose() const;
301
+ EIGEN_DEVICE_FUNC void transposeInPlace();
302
+
303
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value);
304
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index size, const Scalar& value);
305
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(const Scalar& value);
306
+
307
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t, Index size,
308
+ const Scalar& low,
309
+ const Scalar& high);
310
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Sequential_t,
311
+ const Scalar& low,
312
+ const Scalar& high);
313
+
314
+ EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low,
315
+ const Scalar& high);
316
+ EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high);
317
+
318
+ EIGEN_DEVICE_FUNC static const RandomAccessEqualSpacedReturnType EqualSpaced(Index size, const Scalar& low,
319
+ const Scalar& step);
320
+ EIGEN_DEVICE_FUNC static const RandomAccessEqualSpacedReturnType EqualSpaced(const Scalar& low, const Scalar& step);
321
+
322
+ template <typename CustomNullaryOp>
323
+ EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index rows, Index cols,
324
+ const CustomNullaryOp& func);
325
+ template <typename CustomNullaryOp>
326
+ EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index size,
327
+ const CustomNullaryOp& func);
328
+ template <typename CustomNullaryOp>
329
+ EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(const CustomNullaryOp& func);
330
+
331
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols);
332
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size);
333
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Zero();
334
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols);
335
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size);
336
+ EIGEN_DEVICE_FUNC static const ConstantReturnType Ones();
337
+
338
+ EIGEN_DEVICE_FUNC void fill(const Scalar& value);
339
+ EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value);
340
+ EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
341
+ EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high);
342
+ EIGEN_DEVICE_FUNC Derived& setEqualSpaced(Index size, const Scalar& low, const Scalar& step);
343
+ EIGEN_DEVICE_FUNC Derived& setEqualSpaced(const Scalar& low, const Scalar& step);
344
+ EIGEN_DEVICE_FUNC Derived& setZero();
345
+ EIGEN_DEVICE_FUNC Derived& setOnes();
346
+ EIGEN_DEVICE_FUNC Derived& setRandom();
347
+
348
+ template <typename OtherDerived>
349
+ EIGEN_DEVICE_FUNC bool isApprox(const DenseBase<OtherDerived>& other,
350
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
351
+ EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const RealScalar& other,
352
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
353
+ template <typename OtherDerived>
354
+ EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
355
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
356
+
357
+ EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value,
358
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
359
+ EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value,
360
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
361
+ EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
362
+ EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
363
+
364
+ EIGEN_DEVICE_FUNC inline bool hasNaN() const;
365
+ EIGEN_DEVICE_FUNC inline bool allFinite() const;
366
+
367
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const Scalar& other);
368
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const Scalar& other);
369
+
370
+ typedef internal::add_const_on_value_type_t<typename internal::eval<Derived>::type> EvalReturnType;
371
+ /** \returns the matrix or vector obtained by evaluating this expression.
372
+ *
373
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
374
+ * a const reference, in order to avoid a useless copy.
375
+ *
376
+ * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page
377
+ * \endlink.
378
+ */
379
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvalReturnType eval() const {
380
+ // Even though MSVC does not honor strong inlining when the return type
381
+ // is a dynamic matrix, we desperately need strong inlining for fixed
382
+ // size types on MSVC.
383
+ return typename internal::eval<Derived>::type(derived());
384
+ }
385
+
386
+ /** swaps *this with the expression \a other.
387
+ *
388
+ */
389
+ template <typename OtherDerived>
390
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(const DenseBase<OtherDerived>& other) {
391
+ EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
392
+ eigen_assert(rows() == other.rows() && cols() == other.cols());
393
+ call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
394
+ }
395
+
396
+ /** swaps *this with the matrix or array \a other.
397
+ *
398
+ */
399
+ template <typename OtherDerived>
400
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(PlainObjectBase<OtherDerived>& other) {
401
+ eigen_assert(rows() == other.rows() && cols() == other.cols());
402
+ call_assignment(derived(), other.derived(), internal::swap_assign_op<Scalar>());
403
+ }
404
+
405
+ EIGEN_DEVICE_FUNC inline const NestByValue<Derived> nestByValue() const;
406
+ EIGEN_DEVICE_FUNC inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
407
+ EIGEN_DEVICE_FUNC inline ForceAlignedAccess<Derived> forceAlignedAccess();
408
+ template <bool Enable>
409
+ EIGEN_DEVICE_FUNC inline const std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&>
410
+ forceAlignedAccessIf() const;
411
+ template <bool Enable>
412
+ EIGEN_DEVICE_FUNC inline std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&> forceAlignedAccessIf();
413
+
414
+ EIGEN_DEVICE_FUNC Scalar sum() const;
415
+ EIGEN_DEVICE_FUNC Scalar mean() const;
416
+ EIGEN_DEVICE_FUNC Scalar trace() const;
417
+
418
+ EIGEN_DEVICE_FUNC Scalar prod() const;
419
+
420
+ template <int NaNPropagation>
421
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const;
422
+ template <int NaNPropagation>
423
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const;
424
+
425
+ // By default, the fastest version with undefined NaN propagation semantics is
426
+ // used.
427
+ // TODO(rmlarsen): Replace with default template argument when we move to
428
+ // c++11 or beyond.
429
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff() const {
430
+ return minCoeff<PropagateFast>();
431
+ }
432
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff() const {
433
+ return maxCoeff<PropagateFast>();
434
+ }
435
+
436
+ template <int NaNPropagation, typename IndexType>
437
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
438
+ template <int NaNPropagation, typename IndexType>
439
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
440
+ template <int NaNPropagation, typename IndexType>
441
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
442
+ template <int NaNPropagation, typename IndexType>
443
+ EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
444
+
445
+ // TODO(rmlarsen): Replace these methods with a default template argument.
446
+ template <typename IndexType>
447
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const {
448
+ return minCoeff<PropagateFast>(row, col);
449
+ }
450
+ template <typename IndexType>
451
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const {
452
+ return maxCoeff<PropagateFast>(row, col);
453
+ }
454
+ template <typename IndexType>
455
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const {
456
+ return minCoeff<PropagateFast>(index);
457
+ }
458
+ template <typename IndexType>
459
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const {
460
+ return maxCoeff<PropagateFast>(index);
461
+ }
462
+
463
+ template <typename BinaryOp>
464
+ EIGEN_DEVICE_FUNC Scalar redux(const BinaryOp& func) const;
465
+
466
+ template <typename Visitor>
467
+ EIGEN_DEVICE_FUNC void visit(Visitor& func) const;
468
+
469
+ /** \returns a WithFormat proxy object allowing to print a matrix the with given
470
+ * format \a fmt.
471
+ *
472
+ * See class IOFormat for some examples.
473
+ *
474
+ * \sa class IOFormat, class WithFormat
475
+ */
476
+ inline const WithFormat<Derived> format(const IOFormat& fmt) const { return WithFormat<Derived>(derived(), fmt); }
477
+
478
+ /** \returns the unique coefficient of a 1x1 expression */
479
+ EIGEN_DEVICE_FUNC CoeffReturnType value() const {
480
+ EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1);
481
+ return derived().coeff(0, 0);
482
+ }
483
+
484
+ EIGEN_DEVICE_FUNC bool all() const;
485
+ EIGEN_DEVICE_FUNC bool any() const;
486
+ EIGEN_DEVICE_FUNC Index count() const;
487
+
488
+ typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
489
+ typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
490
+ typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
491
+ typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
492
+
493
+ /** \returns a VectorwiseOp wrapper of *this for broadcasting and partial reductions
494
+ *
495
+ * Example: \include MatrixBase_rowwise.cpp
496
+ * Output: \verbinclude MatrixBase_rowwise.out
497
+ *
498
+ * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
499
+ */
500
+ // Code moved here due to a CUDA compiler bug
501
+ EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { return ConstRowwiseReturnType(derived()); }
502
+ EIGEN_DEVICE_FUNC RowwiseReturnType rowwise();
503
+
504
+ /** \returns a VectorwiseOp wrapper of *this broadcasting and partial reductions
505
+ *
506
+ * Example: \include MatrixBase_colwise.cpp
507
+ * Output: \verbinclude MatrixBase_colwise.out
508
+ *
509
+ * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
510
+ */
511
+ EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { return ConstColwiseReturnType(derived()); }
512
+ EIGEN_DEVICE_FUNC ColwiseReturnType colwise();
513
+
514
+ typedef CwiseNullaryOp<internal::scalar_random_op<Scalar>, PlainObject> RandomReturnType;
515
+ static const RandomReturnType Random(Index rows, Index cols);
516
+ static const RandomReturnType Random(Index size);
517
+ static const RandomReturnType Random();
518
+
519
+ template <typename ThenDerived, typename ElseDerived>
520
+ inline EIGEN_DEVICE_FUNC
521
+ CwiseTernaryOp<internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar,
522
+ typename DenseBase<ElseDerived>::Scalar, Scalar>,
523
+ ThenDerived, ElseDerived, Derived>
524
+ select(const DenseBase<ThenDerived>& thenMatrix, const DenseBase<ElseDerived>& elseMatrix) const;
525
+
526
+ template <typename ThenDerived>
527
+ inline EIGEN_DEVICE_FUNC
528
+ CwiseTernaryOp<internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar,
529
+ typename DenseBase<ThenDerived>::Scalar, Scalar>,
530
+ ThenDerived, typename DenseBase<ThenDerived>::ConstantReturnType, Derived>
531
+ select(const DenseBase<ThenDerived>& thenMatrix, const typename DenseBase<ThenDerived>::Scalar& elseScalar) const;
532
+
533
+ template <typename ElseDerived>
534
+ inline EIGEN_DEVICE_FUNC
535
+ CwiseTernaryOp<internal::scalar_boolean_select_op<typename DenseBase<ElseDerived>::Scalar,
536
+ typename DenseBase<ElseDerived>::Scalar, Scalar>,
537
+ typename DenseBase<ElseDerived>::ConstantReturnType, ElseDerived, Derived>
538
+ select(const typename DenseBase<ElseDerived>::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
539
+
540
+ template <int p>
541
+ RealScalar lpNorm() const;
542
+
543
+ template <int RowFactor, int ColFactor>
544
+ EIGEN_DEVICE_FUNC const Replicate<Derived, RowFactor, ColFactor> replicate() const;
545
+ /**
546
+ * \return an expression of the replication of \c *this
547
+ *
548
+ * Example: \include MatrixBase_replicate_int_int.cpp
549
+ * Output: \verbinclude MatrixBase_replicate_int_int.out
550
+ *
551
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
552
+ */
553
+ // Code moved here due to a CUDA compiler bug
554
+ EIGEN_DEVICE_FUNC const Replicate<Derived, Dynamic, Dynamic> replicate(Index rowFactor, Index colFactor) const {
555
+ return Replicate<Derived, Dynamic, Dynamic>(derived(), rowFactor, colFactor);
556
+ }
557
+
558
+ typedef Reverse<Derived, BothDirections> ReverseReturnType;
559
+ typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
560
+ EIGEN_DEVICE_FUNC ReverseReturnType reverse();
561
+ /** This is the const version of reverse(). */
562
+ // Code moved here due to a CUDA compiler bug
563
+ EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const { return ConstReverseReturnType(derived()); }
564
+ EIGEN_DEVICE_FUNC void reverseInPlace();
565
+
566
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
567
+ /** STL-like <a href="https://en.cppreference.com/w/cpp/named_req/RandomAccessIterator">RandomAccessIterator</a>
568
+ * iterator type as returned by the begin() and end() methods.
569
+ */
570
+ typedef random_access_iterator_type iterator;
571
+ /** This is the const version of iterator (aka read-only) */
572
+ typedef random_access_iterator_type const_iterator;
573
+ #else
574
+ typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit,
575
+ internal::pointer_based_stl_iterator<Derived>,
576
+ internal::generic_randaccess_stl_iterator<Derived> >
577
+ iterator_type;
578
+
579
+ typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit,
580
+ internal::pointer_based_stl_iterator<const Derived>,
581
+ internal::generic_randaccess_stl_iterator<const Derived> >
582
+ const_iterator_type;
583
+
584
+ // Stl-style iterators are supported only for vectors.
585
+
586
+ typedef std::conditional_t<IsVectorAtCompileTime, iterator_type, void> iterator;
587
+
588
+ typedef std::conditional_t<IsVectorAtCompileTime, const_iterator_type, void> const_iterator;
589
+ #endif
590
+
591
+ inline iterator begin();
592
+ inline const_iterator begin() const;
593
+ inline const_iterator cbegin() const;
594
+ inline iterator end();
595
+ inline const_iterator end() const;
596
+ inline const_iterator cend() const;
597
+
598
+ #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
599
+ #define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
600
+ #define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
601
+ #define EIGEN_DOC_UNARY_ADDONS(X, Y)
602
+ #include "../plugins/CommonCwiseUnaryOps.inc"
603
+ #include "../plugins/BlockMethods.inc"
604
+ #include "../plugins/IndexedViewMethods.inc"
605
+ #include "../plugins/ReshapedMethods.inc"
606
+ #ifdef EIGEN_DENSEBASE_PLUGIN
607
+ #include EIGEN_DENSEBASE_PLUGIN
608
+ #endif
609
+ #undef EIGEN_CURRENT_STORAGE_BASE_CLASS
610
+ #undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
611
+ #undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
612
+ #undef EIGEN_DOC_UNARY_ADDONS
613
+
614
+ // disable the use of evalTo for dense objects with a nice compilation error
615
+ template <typename Dest>
616
+ EIGEN_DEVICE_FUNC inline void evalTo(Dest&) const {
617
+ EIGEN_STATIC_ASSERT((internal::is_same<Dest, void>::value),
618
+ THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
619
+ }
620
+
621
+ protected:
622
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase)
623
+ /** Default constructor. Do nothing. */
624
+ #ifdef EIGEN_INTERNAL_DEBUGGING
625
+ EIGEN_DEVICE_FUNC constexpr DenseBase() {
626
+ /* Just checks for self-consistency of the flags.
627
+ * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down
628
+ */
629
+ EIGEN_STATIC_ASSERT(
630
+ (internal::check_implication(MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1, int(IsRowMajor)) &&
631
+ internal::check_implication(MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1, int(!IsRowMajor))),
632
+ INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
633
+ }
634
+ #else
635
+ EIGEN_DEVICE_FUNC constexpr DenseBase() = default;
636
+ #endif
637
+
638
+ private:
639
+ EIGEN_DEVICE_FUNC explicit DenseBase(int);
640
+ EIGEN_DEVICE_FUNC DenseBase(int, int);
641
+ template <typename OtherDerived>
642
+ EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase<OtherDerived>&);
643
+ };
644
+
645
+ } // end namespace Eigen
646
+
647
+ #endif // EIGEN_DENSEBASE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DenseCoeffsBase.h ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_DENSECOEFFSBASE_H
11
+ #define EIGEN_DENSECOEFFSBASE_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+ template <typename T>
20
+ struct add_const_on_value_type_if_arithmetic {
21
+ typedef std::conditional_t<is_arithmetic<T>::value, T, add_const_on_value_type_t<T>> type;
22
+ };
23
+ } // namespace internal
24
+
25
+ /** \brief Base class providing read-only coefficient access to matrices and arrays.
26
+ * \ingroup Core_Module
27
+ * \tparam Derived Type of the derived class
28
+ *
29
+ * \note #ReadOnlyAccessors Constant indicating read-only access
30
+ *
31
+ * This class defines the \c operator() \c const function and friends, which can be used to read specific
32
+ * entries of a matrix or array.
33
+ *
34
+ * \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
35
+ * \ref TopicClassHierarchy
36
+ */
37
+ template <typename Derived>
38
+ class DenseCoeffsBase<Derived, ReadOnlyAccessors> : public EigenBase<Derived> {
39
+ public:
40
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
41
+ typedef typename internal::traits<Derived>::Scalar Scalar;
42
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
43
+
44
+ // Explanation for this CoeffReturnType typedef.
45
+ // - This is the return type of the coeff() method.
46
+ // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
47
+ // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
48
+ // - The is_arithmetic check is required since "const int", "const double", etc. will cause warnings on some systems
49
+ // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
50
+ // not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
51
+ typedef std::conditional_t<bool(internal::traits<Derived>::Flags& LvalueBit), const Scalar&,
52
+ std::conditional_t<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>>
53
+ CoeffReturnType;
54
+
55
+ typedef typename internal::add_const_on_value_type_if_arithmetic<typename internal::packet_traits<Scalar>::type>::type
56
+ PacketReturnType;
57
+
58
+ typedef EigenBase<Derived> Base;
59
+ using Base::cols;
60
+ using Base::derived;
61
+ using Base::rows;
62
+ using Base::size;
63
+
64
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const {
65
+ return int(Derived::RowsAtCompileTime) == 1 ? 0
66
+ : int(Derived::ColsAtCompileTime) == 1 ? inner
67
+ : int(Derived::Flags) & RowMajorBit ? outer
68
+ : inner;
69
+ }
70
+
71
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const {
72
+ return int(Derived::ColsAtCompileTime) == 1 ? 0
73
+ : int(Derived::RowsAtCompileTime) == 1 ? inner
74
+ : int(Derived::Flags) & RowMajorBit ? inner
75
+ : outer;
76
+ }
77
+
78
+ /** Short version: don't use this function, use
79
+ * \link operator()(Index,Index) const \endlink instead.
80
+ *
81
+ * Long version: this function is similar to
82
+ * \link operator()(Index,Index) const \endlink, but without the assertion.
83
+ * Use this for limiting the performance cost of debugging code when doing
84
+ * repeated coefficient access. Only use this when it is guaranteed that the
85
+ * parameters \a row and \a col are in range.
86
+ *
87
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
88
+ * function equivalent to \link operator()(Index,Index) const \endlink.
89
+ *
90
+ * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
91
+ */
92
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType coeff(Index row, Index col) const {
93
+ eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
94
+ return internal::evaluator<Derived>(derived()).coeff(row, col);
95
+ }
96
+
97
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType coeffByOuterInner(Index outer,
98
+ Index inner) const {
99
+ return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner));
100
+ }
101
+
102
+ /** \returns the coefficient at given the given row and column.
103
+ *
104
+ * \sa operator()(Index,Index), operator[](Index)
105
+ */
106
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType operator()(Index row, Index col) const {
107
+ eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
108
+ return coeff(row, col);
109
+ }
110
+
111
+ /** Short version: don't use this function, use
112
+ * \link operator[](Index) const \endlink instead.
113
+ *
114
+ * Long version: this function is similar to
115
+ * \link operator[](Index) const \endlink, but without the assertion.
116
+ * Use this for limiting the performance cost of debugging code when doing
117
+ * repeated coefficient access. Only use this when it is guaranteed that the
118
+ * parameter \a index is in range.
119
+ *
120
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
121
+ * function equivalent to \link operator[](Index) const \endlink.
122
+ *
123
+ * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
124
+ */
125
+
126
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType coeff(Index index) const {
127
+ EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
128
+ THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
129
+ eigen_internal_assert(index >= 0 && index < size());
130
+ return internal::evaluator<Derived>(derived()).coeff(index);
131
+ }
132
+
133
+ /** \returns the coefficient at given index.
134
+ *
135
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
136
+ *
137
+ * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
138
+ * z() const, w() const
139
+ */
140
+
141
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType operator[](Index index) const {
142
+ EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
143
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
144
+ eigen_assert(index >= 0 && index < size());
145
+ return coeff(index);
146
+ }
147
+
148
+ /** \returns the coefficient at given index.
149
+ *
150
+ * This is synonymous to operator[](Index) const.
151
+ *
152
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
153
+ *
154
+ * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
155
+ * z() const, w() const
156
+ */
157
+
158
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType operator()(Index index) const {
159
+ eigen_assert(index >= 0 && index < size());
160
+ return coeff(index);
161
+ }
162
+
163
+ /** equivalent to operator[](0). */
164
+
165
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType x() const { return (*this)[0]; }
166
+
167
+ /** equivalent to operator[](1). */
168
+
169
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType y() const {
170
+ EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 2, OUT_OF_RANGE_ACCESS);
171
+ return (*this)[1];
172
+ }
173
+
174
+ /** equivalent to operator[](2). */
175
+
176
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType z() const {
177
+ EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 3, OUT_OF_RANGE_ACCESS);
178
+ return (*this)[2];
179
+ }
180
+
181
+ /** equivalent to operator[](3). */
182
+
183
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR CoeffReturnType w() const {
184
+ EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 4, OUT_OF_RANGE_ACCESS);
185
+ return (*this)[3];
186
+ }
187
+
188
+ /** \internal
189
+ * \returns the packet of coefficients starting at the given row and column. It is your responsibility
190
+ * to ensure that a packet really starts there. This method is only available on expressions having the
191
+ * PacketAccessBit.
192
+ *
193
+ * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
194
+ * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
195
+ * starting at an address which is a multiple of the packet size.
196
+ */
197
+
198
+ template <int LoadMode>
199
+ EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const {
200
+ typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;
201
+ eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
202
+ return internal::evaluator<Derived>(derived()).template packet<LoadMode, DefaultPacketType>(row, col);
203
+ }
204
+
205
+ /** \internal */
206
+ template <int LoadMode>
207
+ EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const {
208
+ return packet<LoadMode>(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner));
209
+ }
210
+
211
+ /** \internal
212
+ * \returns the packet of coefficients starting at the given index. It is your responsibility
213
+ * to ensure that a packet really starts there. This method is only available on expressions having the
214
+ * PacketAccessBit and the LinearAccessBit.
215
+ *
216
+ * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
217
+ * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
218
+ * starting at an address which is a multiple of the packet size.
219
+ */
220
+
221
+ template <int LoadMode>
222
+ EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
223
+ EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
224
+ THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
225
+ typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;
226
+ eigen_internal_assert(index >= 0 && index < size());
227
+ return internal::evaluator<Derived>(derived()).template packet<LoadMode, DefaultPacketType>(index);
228
+ }
229
+
230
+ protected:
231
+ // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
232
+ // But some methods are only available in the DirectAccess case.
233
+ // So we add dummy methods here with these names, so that "using... " doesn't fail.
234
+ // It's not private so that the child class DenseBase can access them, and it's not public
235
+ // either since it's an implementation detail, so has to be protected.
236
+ void coeffRef();
237
+ void coeffRefByOuterInner();
238
+ void writePacket();
239
+ void writePacketByOuterInner();
240
+ void copyCoeff();
241
+ void copyCoeffByOuterInner();
242
+ void copyPacket();
243
+ void copyPacketByOuterInner();
244
+ void stride();
245
+ void innerStride();
246
+ void outerStride();
247
+ void rowStride();
248
+ void colStride();
249
+ };
250
+
251
+ /** \brief Base class providing read/write coefficient access to matrices and arrays.
252
+ * \ingroup Core_Module
253
+ * \tparam Derived Type of the derived class
254
+ *
255
+ * \note #WriteAccessors Constant indicating read/write access
256
+ *
257
+ * This class defines the non-const \c operator() function and friends, which can be used to write specific
258
+ * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
259
+ * defines the const variant for reading specific entries.
260
+ *
261
+ * \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
262
+ */
263
+ template <typename Derived>
264
+ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> {
265
+ public:
266
+ typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
267
+
268
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
269
+ typedef typename internal::traits<Derived>::Scalar Scalar;
270
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
271
+ typedef typename NumTraits<Scalar>::Real RealScalar;
272
+
273
+ using Base::coeff;
274
+ using Base::colIndexByOuterInner;
275
+ using Base::cols;
276
+ using Base::derived;
277
+ using Base::rowIndexByOuterInner;
278
+ using Base::rows;
279
+ using Base::size;
280
+ using Base::operator[];
281
+ using Base::operator();
282
+ using Base::w;
283
+ using Base::x;
284
+ using Base::y;
285
+ using Base::z;
286
+
287
+ /** Short version: don't use this function, use
288
+ * \link operator()(Index,Index) \endlink instead.
289
+ *
290
+ * Long version: this function is similar to
291
+ * \link operator()(Index,Index) \endlink, but without the assertion.
292
+ * Use this for limiting the performance cost of debugging code when doing
293
+ * repeated coefficient access. Only use this when it is guaranteed that the
294
+ * parameters \a row and \a col are in range.
295
+ *
296
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
297
+ * function equivalent to \link operator()(Index,Index) \endlink.
298
+ *
299
+ * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
300
+ */
301
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) {
302
+ eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
303
+ return internal::evaluator<Derived>(derived()).coeffRef(row, col);
304
+ }
305
+
306
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRefByOuterInner(Index outer, Index inner) {
307
+ return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner));
308
+ }
309
+
310
+ /** \returns a reference to the coefficient at given the given row and column.
311
+ *
312
+ * \sa operator[](Index)
313
+ */
314
+
315
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index row, Index col) {
316
+ eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
317
+ return coeffRef(row, col);
318
+ }
319
+
320
+ /** Short version: don't use this function, use
321
+ * \link operator[](Index) \endlink instead.
322
+ *
323
+ * Long version: this function is similar to
324
+ * \link operator[](Index) \endlink, but without the assertion.
325
+ * Use this for limiting the performance cost of debugging code when doing
326
+ * repeated coefficient access. Only use this when it is guaranteed that the
327
+ * parameters \a row and \a col are in range.
328
+ *
329
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
330
+ * function equivalent to \link operator[](Index) \endlink.
331
+ *
332
+ * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
333
+ */
334
+
335
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
336
+ EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,
337
+ THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)
338
+ eigen_internal_assert(index >= 0 && index < size());
339
+ return internal::evaluator<Derived>(derived()).coeffRef(index);
340
+ }
341
+
342
+ /** \returns a reference to the coefficient at given index.
343
+ *
344
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
345
+ *
346
+ * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
347
+ */
348
+
349
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) {
350
+ EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
351
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
352
+ eigen_assert(index >= 0 && index < size());
353
+ return coeffRef(index);
354
+ }
355
+
356
+ /** \returns a reference to the coefficient at given index.
357
+ *
358
+ * This is synonymous to operator[](Index).
359
+ *
360
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
361
+ *
362
+ * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
363
+ */
364
+
365
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& operator()(Index index) {
366
+ eigen_assert(index >= 0 && index < size());
367
+ return coeffRef(index);
368
+ }
369
+
370
+ /** equivalent to operator[](0). */
371
+
372
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& x() { return (*this)[0]; }
373
+
374
+ /** equivalent to operator[](1). */
375
+
376
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& y() {
377
+ EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 2, OUT_OF_RANGE_ACCESS);
378
+ return (*this)[1];
379
+ }
380
+
381
+ /** equivalent to operator[](2). */
382
+
383
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& z() {
384
+ EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 3, OUT_OF_RANGE_ACCESS);
385
+ return (*this)[2];
386
+ }
387
+
388
+ /** equivalent to operator[](3). */
389
+
390
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Scalar& w() {
391
+ EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime == -1 || Derived::SizeAtCompileTime >= 4, OUT_OF_RANGE_ACCESS);
392
+ return (*this)[3];
393
+ }
394
+ };
395
+
396
+ /** \brief Base class providing direct read-only coefficient access to matrices and arrays.
397
+ * \ingroup Core_Module
398
+ * \tparam Derived Type of the derived class
399
+ *
400
+ * \note #DirectAccessors Constant indicating direct access
401
+ *
402
+ * This class defines functions to work with strides which can be used to access entries directly. This class
403
+ * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
404
+ * \c operator() .
405
+ *
406
+ * \sa \blank \ref TopicClassHierarchy
407
+ */
408
+ template <typename Derived>
409
+ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> {
410
+ public:
411
+ typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
412
+ typedef typename internal::traits<Derived>::Scalar Scalar;
413
+ typedef typename NumTraits<Scalar>::Real RealScalar;
414
+
415
+ using Base::cols;
416
+ using Base::derived;
417
+ using Base::rows;
418
+ using Base::size;
419
+
420
+ /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
421
+ *
422
+ * \sa outerStride(), rowStride(), colStride()
423
+ */
424
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return derived().innerStride(); }
425
+
426
+ /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
427
+ * in a column-major matrix).
428
+ *
429
+ * \sa innerStride(), rowStride(), colStride()
430
+ */
431
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const { return derived().outerStride(); }
432
+
433
+ // FIXME shall we remove it ?
434
+ EIGEN_CONSTEXPR inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); }
435
+
436
+ /** \returns the pointer increment between two consecutive rows.
437
+ *
438
+ * \sa innerStride(), outerStride(), colStride()
439
+ */
440
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rowStride() const {
441
+ return Derived::IsRowMajor ? outerStride() : innerStride();
442
+ }
443
+
444
+ /** \returns the pointer increment between two consecutive columns.
445
+ *
446
+ * \sa innerStride(), outerStride(), rowStride()
447
+ */
448
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index colStride() const {
449
+ return Derived::IsRowMajor ? innerStride() : outerStride();
450
+ }
451
+ };
452
+
453
+ /** \brief Base class providing direct read/write coefficient access to matrices and arrays.
454
+ * \ingroup Core_Module
455
+ * \tparam Derived Type of the derived class
456
+ *
457
+ * \note #DirectWriteAccessors Constant indicating direct access
458
+ *
459
+ * This class defines functions to work with strides which can be used to access entries directly. This class
460
+ * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
461
+ * \c operator().
462
+ *
463
+ * \sa \blank \ref TopicClassHierarchy
464
+ */
465
+ template <typename Derived>
466
+ class DenseCoeffsBase<Derived, DirectWriteAccessors> : public DenseCoeffsBase<Derived, WriteAccessors> {
467
+ public:
468
+ typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
469
+ typedef typename internal::traits<Derived>::Scalar Scalar;
470
+ typedef typename NumTraits<Scalar>::Real RealScalar;
471
+
472
+ using Base::cols;
473
+ using Base::derived;
474
+ using Base::rows;
475
+ using Base::size;
476
+
477
+ /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
478
+ *
479
+ * \sa outerStride(), rowStride(), colStride()
480
+ */
481
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return derived().innerStride(); }
482
+
483
+ /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
484
+ * in a column-major matrix).
485
+ *
486
+ * \sa innerStride(), rowStride(), colStride()
487
+ */
488
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return derived().outerStride(); }
489
+
490
+ // FIXME shall we remove it ?
491
+ EIGEN_CONSTEXPR inline Index stride() const EIGEN_NOEXCEPT {
492
+ return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
493
+ }
494
+
495
+ /** \returns the pointer increment between two consecutive rows.
496
+ *
497
+ * \sa innerStride(), outerStride(), colStride()
498
+ */
499
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rowStride() const EIGEN_NOEXCEPT {
500
+ return Derived::IsRowMajor ? outerStride() : innerStride();
501
+ }
502
+
503
+ /** \returns the pointer increment between two consecutive columns.
504
+ *
505
+ * \sa innerStride(), outerStride(), rowStride()
506
+ */
507
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index colStride() const EIGEN_NOEXCEPT {
508
+ return Derived::IsRowMajor ? innerStride() : outerStride();
509
+ }
510
+ };
511
+
512
+ namespace internal {
513
+
514
+ template <int Alignment, typename Derived, bool JustReturnZero>
515
+ struct first_aligned_impl {
516
+ static EIGEN_CONSTEXPR inline Index run(const Derived&) EIGEN_NOEXCEPT { return 0; }
517
+ };
518
+
519
+ template <int Alignment, typename Derived>
520
+ struct first_aligned_impl<Alignment, Derived, false> {
521
+ static inline Index run(const Derived& m) { return internal::first_aligned<Alignment>(m.data(), m.size()); }
522
+ };
523
+
524
+ /** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect
525
+ * to \a Alignment for vectorization.
526
+ *
527
+ * \tparam Alignment requested alignment in Bytes.
528
+ *
529
+ * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
530
+ * documentation.
531
+ */
532
+ template <int Alignment, typename Derived>
533
+ static inline Index first_aligned(const DenseBase<Derived>& m) {
534
+ enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) };
535
+ return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived());
536
+ }
537
+
538
+ template <typename Derived>
539
+ static inline Index first_default_aligned(const DenseBase<Derived>& m) {
540
+ typedef typename Derived::Scalar Scalar;
541
+ typedef typename packet_traits<Scalar>::type DefaultPacketType;
542
+ return internal::first_aligned<int(unpacket_traits<DefaultPacketType>::alignment), Derived>(m);
543
+ }
544
+
545
+ template <typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
546
+ struct inner_stride_at_compile_time {
547
+ enum { ret = traits<Derived>::InnerStrideAtCompileTime };
548
+ };
549
+
550
+ template <typename Derived>
551
+ struct inner_stride_at_compile_time<Derived, false> {
552
+ enum { ret = 0 };
553
+ };
554
+
555
+ template <typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
556
+ struct outer_stride_at_compile_time {
557
+ enum { ret = traits<Derived>::OuterStrideAtCompileTime };
558
+ };
559
+
560
+ template <typename Derived>
561
+ struct outer_stride_at_compile_time<Derived, false> {
562
+ enum { ret = 0 };
563
+ };
564
+
565
+ } // end namespace internal
566
+
567
+ } // end namespace Eigen
568
+
569
+ #endif // EIGEN_DENSECOEFFSBASE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DenseStorage.h ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ // Copyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ #ifndef EIGEN_MATRIXSTORAGE_H
13
+ #define EIGEN_MATRIXSTORAGE_H
14
+
15
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
16
+ #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) \
17
+ X; \
18
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
19
+ #else
20
+ #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X)
21
+ #endif
22
+
23
+ // IWYU pragma: private
24
+ #include "./InternalHeaderCheck.h"
25
+
26
+ namespace Eigen {
27
+
28
+ namespace internal {
29
+
30
+ struct constructor_without_unaligned_array_assert {};
31
+
32
+ template <typename T, int Size>
33
+ EIGEN_DEVICE_FUNC constexpr void check_static_allocation_size() {
34
+ // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
35
+ #if EIGEN_STACK_ALLOCATION_LIMIT
36
+ EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
37
+ #endif
38
+ }
39
+
40
+ /** \internal
41
+ * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
42
+ * to 16 bytes boundary if the total size is a multiple of 16 bytes.
43
+ */
44
+ template <typename T, int Size, int MatrixOrArrayOptions,
45
+ int Alignment = (MatrixOrArrayOptions & DontAlign) ? 0 : compute_default_alignment<T, Size>::value>
46
+ struct plain_array {
47
+ T array[Size];
48
+
49
+ EIGEN_DEVICE_FUNC constexpr plain_array() { check_static_allocation_size<T, Size>(); }
50
+
51
+ EIGEN_DEVICE_FUNC constexpr plain_array(constructor_without_unaligned_array_assert) {
52
+ check_static_allocation_size<T, Size>();
53
+ }
54
+ };
55
+
56
+ #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
57
+ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
58
+ #else
59
+ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
60
+ eigen_assert((internal::is_constant_evaluated() || (std::uintptr_t(array) & (sizemask)) == 0) && \
61
+ "this assertion is explained here: " \
62
+ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
63
+ " **** READ THIS WEB PAGE !!! ****");
64
+ #endif
65
+
66
+ template <typename T, int Size, int MatrixOrArrayOptions>
67
+ struct plain_array<T, Size, MatrixOrArrayOptions, 8> {
68
+ EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size];
69
+
70
+ EIGEN_DEVICE_FUNC constexpr plain_array() {
71
+ EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7);
72
+ check_static_allocation_size<T, Size>();
73
+ }
74
+
75
+ EIGEN_DEVICE_FUNC constexpr plain_array(constructor_without_unaligned_array_assert) {
76
+ check_static_allocation_size<T, Size>();
77
+ }
78
+ };
79
+
80
+ template <typename T, int Size, int MatrixOrArrayOptions>
81
+ struct plain_array<T, Size, MatrixOrArrayOptions, 16> {
82
+ EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size];
83
+
84
+ EIGEN_DEVICE_FUNC constexpr plain_array() {
85
+ EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15);
86
+ check_static_allocation_size<T, Size>();
87
+ }
88
+
89
+ EIGEN_DEVICE_FUNC constexpr plain_array(constructor_without_unaligned_array_assert) {
90
+ check_static_allocation_size<T, Size>();
91
+ }
92
+ };
93
+
94
+ template <typename T, int Size, int MatrixOrArrayOptions>
95
+ struct plain_array<T, Size, MatrixOrArrayOptions, 32> {
96
+ EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size];
97
+
98
+ EIGEN_DEVICE_FUNC constexpr plain_array() {
99
+ EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31);
100
+ check_static_allocation_size<T, Size>();
101
+ }
102
+
103
+ EIGEN_DEVICE_FUNC constexpr plain_array(constructor_without_unaligned_array_assert) {
104
+ check_static_allocation_size<T, Size>();
105
+ }
106
+ };
107
+
108
+ template <typename T, int Size, int MatrixOrArrayOptions>
109
+ struct plain_array<T, Size, MatrixOrArrayOptions, 64> {
110
+ EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size];
111
+
112
+ EIGEN_DEVICE_FUNC constexpr plain_array() {
113
+ EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63);
114
+ check_static_allocation_size<T, Size>();
115
+ }
116
+
117
+ EIGEN_DEVICE_FUNC constexpr plain_array(constructor_without_unaligned_array_assert) {
118
+ check_static_allocation_size<T, Size>();
119
+ }
120
+ };
121
+
122
+ template <typename T, int MatrixOrArrayOptions, int Alignment>
123
+ struct plain_array<T, 0, MatrixOrArrayOptions, Alignment> {
124
+ T array[1];
125
+ EIGEN_DEVICE_FUNC constexpr plain_array() {}
126
+ EIGEN_DEVICE_FUNC constexpr plain_array(constructor_without_unaligned_array_assert) {}
127
+ };
128
+
129
+ struct plain_array_helper {
130
+ template <typename T, int Size, int MatrixOrArrayOptions, int Alignment>
131
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void copy(
132
+ const plain_array<T, Size, MatrixOrArrayOptions, Alignment>& src, const Eigen::Index size,
133
+ plain_array<T, Size, MatrixOrArrayOptions, Alignment>& dst) {
134
+ smart_copy(src.array, src.array + size, dst.array);
135
+ }
136
+
137
+ template <typename T, int Size, int MatrixOrArrayOptions, int Alignment>
138
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void swap(plain_array<T, Size, MatrixOrArrayOptions, Alignment>& a,
139
+ const Eigen::Index a_size,
140
+ plain_array<T, Size, MatrixOrArrayOptions, Alignment>& b,
141
+ const Eigen::Index b_size) {
142
+ if (a_size < b_size) {
143
+ std::swap_ranges(b.array, b.array + a_size, a.array);
144
+ smart_move(b.array + a_size, b.array + b_size, a.array + a_size);
145
+ } else if (a_size > b_size) {
146
+ std::swap_ranges(a.array, a.array + b_size, b.array);
147
+ smart_move(a.array + b_size, a.array + a_size, b.array + b_size);
148
+ } else {
149
+ std::swap_ranges(a.array, a.array + a_size, b.array);
150
+ }
151
+ }
152
+ };
153
+
154
+ } // end namespace internal
155
+
156
+ /** \internal
157
+ *
158
+ * \class DenseStorage
159
+ * \ingroup Core_Module
160
+ *
161
+ * \brief Stores the data of a matrix
162
+ *
163
+ * This class stores the data of fixed-size, dynamic-size or mixed matrices
164
+ * in a way as compact as possible.
165
+ *
166
+ * \sa Matrix
167
+ */
168
+ template <typename T, int Size, int Rows_, int Cols_, int Options_>
169
+ class DenseStorage;
170
+
171
+ // purely fixed-size matrix
172
+ template <typename T, int Size, int Rows_, int Cols_, int Options_>
173
+ class DenseStorage {
174
+ internal::plain_array<T, Size, Options_> m_data;
175
+
176
+ public:
177
+ constexpr EIGEN_DEVICE_FUNC DenseStorage(){EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(
178
+ Index size =
179
+ Size)} EIGEN_DEVICE_FUNC explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert)
180
+ : m_data(internal::constructor_without_unaligned_array_assert()) {}
181
+ #if defined(EIGEN_DENSE_STORAGE_CTOR_PLUGIN)
182
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(const DenseStorage& other)
183
+ : m_data(other.m_data){EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)}
184
+ #else
185
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(const DenseStorage&) = default;
186
+ #endif
187
+ EIGEN_DEVICE_FUNC constexpr DenseStorage
188
+ &
189
+ operator=(const DenseStorage&) = default;
190
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(DenseStorage&&) = default;
191
+ EIGEN_DEVICE_FUNC constexpr DenseStorage& operator=(DenseStorage&&) = default;
192
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(Index size, Index rows, Index cols) {
193
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
194
+ eigen_internal_assert(size == rows * cols && rows == Rows_ && cols == Cols_);
195
+ EIGEN_UNUSED_VARIABLE(size);
196
+ EIGEN_UNUSED_VARIABLE(rows);
197
+ EIGEN_UNUSED_VARIABLE(cols);
198
+ }
199
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_data, other.m_data); }
200
+ EIGEN_DEVICE_FUNC static constexpr Index rows(void) EIGEN_NOEXCEPT { return Rows_; }
201
+ EIGEN_DEVICE_FUNC static constexpr Index cols(void) EIGEN_NOEXCEPT { return Cols_; }
202
+ EIGEN_DEVICE_FUNC constexpr void conservativeResize(Index, Index, Index) {}
203
+ EIGEN_DEVICE_FUNC constexpr void resize(Index, Index, Index) {}
204
+ EIGEN_DEVICE_FUNC constexpr const T* data() const { return m_data.array; }
205
+ EIGEN_DEVICE_FUNC constexpr T* data() { return m_data.array; }
206
+ };
207
+
208
+ // null matrix
209
+ template <typename T, int Rows_, int Cols_, int Options_>
210
+ class DenseStorage<T, 0, Rows_, Cols_, Options_> {
211
+ public:
212
+ static_assert(Rows_ * Cols_ == 0, "The fixed number of rows times columns must equal the storage size.");
213
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() {}
214
+ EIGEN_DEVICE_FUNC explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert) {}
215
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(const DenseStorage&) {}
216
+ EIGEN_DEVICE_FUNC constexpr DenseStorage& operator=(const DenseStorage&) { return *this; }
217
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(Index, Index, Index) {}
218
+ EIGEN_DEVICE_FUNC constexpr void swap(DenseStorage&) {}
219
+ EIGEN_DEVICE_FUNC static constexpr Index rows(void) EIGEN_NOEXCEPT { return Rows_; }
220
+ EIGEN_DEVICE_FUNC static constexpr Index cols(void) EIGEN_NOEXCEPT { return Cols_; }
221
+ EIGEN_DEVICE_FUNC constexpr void conservativeResize(Index, Index, Index) {}
222
+ EIGEN_DEVICE_FUNC constexpr void resize(Index, Index, Index) {}
223
+ EIGEN_DEVICE_FUNC constexpr const T* data() const { return 0; }
224
+ EIGEN_DEVICE_FUNC constexpr T* data() { return 0; }
225
+ };
226
+
227
+ // more specializations for null matrices; these are necessary to resolve ambiguities
228
+ template <typename T, int Options_>
229
+ class DenseStorage<T, 0, Dynamic, Dynamic, Options_> {
230
+ Index m_rows;
231
+ Index m_cols;
232
+
233
+ public:
234
+ EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
235
+ EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : DenseStorage() {}
236
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_rows(other.m_rows), m_cols(other.m_cols) {}
237
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
238
+ m_rows = other.m_rows;
239
+ m_cols = other.m_cols;
240
+ return *this;
241
+ }
242
+ EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {
243
+ eigen_assert(m_rows * m_cols == 0 && "The number of rows times columns must equal the storage size.");
244
+ }
245
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
246
+ numext::swap(m_rows, other.m_rows);
247
+ numext::swap(m_cols, other.m_cols);
248
+ }
249
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_rows; }
250
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_cols; }
251
+ EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) {
252
+ m_rows = rows;
253
+ m_cols = cols;
254
+ eigen_assert(m_rows * m_cols == 0 && "The number of rows times columns must equal the storage size.");
255
+ }
256
+ EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) {
257
+ m_rows = rows;
258
+ m_cols = cols;
259
+ eigen_assert(m_rows * m_cols == 0 && "The number of rows times columns must equal the storage size.");
260
+ }
261
+ EIGEN_DEVICE_FUNC const T* data() const { return nullptr; }
262
+ EIGEN_DEVICE_FUNC T* data() { return nullptr; }
263
+ };
264
+
265
+ template <typename T, int Rows_, int Options_>
266
+ class DenseStorage<T, 0, Rows_, Dynamic, Options_> {
267
+ Index m_cols;
268
+
269
+ public:
270
+ EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}
271
+ EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : DenseStorage() {}
272
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_cols(other.m_cols) {}
273
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
274
+ m_cols = other.m_cols;
275
+ return *this;
276
+ }
277
+ EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {
278
+ eigen_assert(Rows_ * m_cols == 0 && "The number of rows times columns must equal the storage size.");
279
+ }
280
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_cols, other.m_cols); }
281
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index rows(void) EIGEN_NOEXCEPT { return Rows_; }
282
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols(void) const EIGEN_NOEXCEPT { return m_cols; }
283
+ EIGEN_DEVICE_FUNC void conservativeResize(Index, Index, Index cols) {
284
+ m_cols = cols;
285
+ eigen_assert(Rows_ * m_cols == 0 && "The number of rows times columns must equal the storage size.");
286
+ }
287
+ EIGEN_DEVICE_FUNC void resize(Index, Index, Index cols) {
288
+ m_cols = cols;
289
+ eigen_assert(Rows_ * m_cols == 0 && "The number of rows times columns must equal the storage size.");
290
+ }
291
+ EIGEN_DEVICE_FUNC const T* data() const { return nullptr; }
292
+ EIGEN_DEVICE_FUNC T* data() { return nullptr; }
293
+ };
294
+
295
+ template <typename T, int Cols_, int Options_>
296
+ class DenseStorage<T, 0, Dynamic, Cols_, Options_> {
297
+ Index m_rows;
298
+
299
+ public:
300
+ EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}
301
+ EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : DenseStorage() {}
302
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_rows(other.m_rows) {}
303
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
304
+ m_rows = other.m_rows;
305
+ return *this;
306
+ }
307
+ EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {
308
+ eigen_assert(m_rows * Cols_ == 0 && "The number of rows times columns must equal the storage size.");
309
+ }
310
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { numext::swap(m_rows, other.m_rows); }
311
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows(void) const EIGEN_NOEXCEPT { return m_rows; }
312
+ EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR Index cols(void) EIGEN_NOEXCEPT { return Cols_; }
313
+ EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) {
314
+ m_rows = rows;
315
+ eigen_assert(m_rows * Cols_ == 0 && "The number of rows times columns must equal the storage size.");
316
+ }
317
+ EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) {
318
+ m_rows = rows;
319
+ eigen_assert(m_rows * Cols_ == 0 && "The number of rows times columns must equal the storage size.");
320
+ }
321
+ EIGEN_DEVICE_FUNC const T* data() const { return nullptr; }
322
+ EIGEN_DEVICE_FUNC T* data() { return nullptr; }
323
+ };
324
+
325
+ // dynamic-size matrix with fixed-size storage
326
+ template <typename T, int Size, int Options_>
327
+ class DenseStorage<T, Size, Dynamic, Dynamic, Options_> {
328
+ internal::plain_array<T, Size, Options_> m_data;
329
+ Index m_rows;
330
+ Index m_cols;
331
+
332
+ public:
333
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() : m_data(), m_rows(0), m_cols(0) {}
334
+ EIGEN_DEVICE_FUNC explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert)
335
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
336
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(const DenseStorage& other)
337
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows), m_cols(other.m_cols) {
338
+ internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data);
339
+ }
340
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
341
+ if (this != &other) {
342
+ m_rows = other.m_rows;
343
+ m_cols = other.m_cols;
344
+ internal::plain_array_helper::copy(other.m_data, m_rows * m_cols, m_data);
345
+ }
346
+ return *this;
347
+ }
348
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}
349
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
350
+ internal::plain_array_helper::swap(m_data, m_rows * m_cols, other.m_data, other.m_rows * other.m_cols);
351
+ numext::swap(m_rows, other.m_rows);
352
+ numext::swap(m_cols, other.m_cols);
353
+ }
354
+ EIGEN_DEVICE_FUNC constexpr Index rows() const { return m_rows; }
355
+ EIGEN_DEVICE_FUNC constexpr Index cols() const { return m_cols; }
356
+ EIGEN_DEVICE_FUNC constexpr void conservativeResize(Index, Index rows, Index cols) {
357
+ m_rows = rows;
358
+ m_cols = cols;
359
+ }
360
+ EIGEN_DEVICE_FUNC constexpr void resize(Index, Index rows, Index cols) {
361
+ m_rows = rows;
362
+ m_cols = cols;
363
+ }
364
+ EIGEN_DEVICE_FUNC constexpr const T* data() const { return m_data.array; }
365
+ EIGEN_DEVICE_FUNC constexpr T* data() { return m_data.array; }
366
+ };
367
+
368
+ // dynamic-size matrix with fixed-size storage and fixed width
369
+ template <typename T, int Size, int Cols_, int Options_>
370
+ class DenseStorage<T, Size, Dynamic, Cols_, Options_> {
371
+ internal::plain_array<T, Size, Options_> m_data;
372
+ Index m_rows;
373
+
374
+ public:
375
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() : m_rows(0) {}
376
+ EIGEN_DEVICE_FUNC explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert)
377
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
378
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(const DenseStorage& other)
379
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(other.m_rows) {
380
+ internal::plain_array_helper::copy(other.m_data, m_rows * Cols_, m_data);
381
+ }
382
+
383
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
384
+ if (this != &other) {
385
+ m_rows = other.m_rows;
386
+ internal::plain_array_helper::copy(other.m_data, m_rows * Cols_, m_data);
387
+ }
388
+ return *this;
389
+ }
390
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(Index, Index rows, Index) : m_rows(rows) {}
391
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
392
+ internal::plain_array_helper::swap(m_data, m_rows * Cols_, other.m_data, other.m_rows * Cols_);
393
+ numext::swap(m_rows, other.m_rows);
394
+ }
395
+ EIGEN_DEVICE_FUNC constexpr Index rows(void) const EIGEN_NOEXCEPT { return m_rows; }
396
+ EIGEN_DEVICE_FUNC constexpr Index cols(void) const EIGEN_NOEXCEPT { return Cols_; }
397
+ EIGEN_DEVICE_FUNC constexpr void conservativeResize(Index, Index rows, Index) { m_rows = rows; }
398
+ EIGEN_DEVICE_FUNC constexpr void resize(Index, Index rows, Index) { m_rows = rows; }
399
+ EIGEN_DEVICE_FUNC constexpr const T* data() const { return m_data.array; }
400
+ EIGEN_DEVICE_FUNC constexpr T* data() { return m_data.array; }
401
+ };
402
+
403
+ // dynamic-size matrix with fixed-size storage and fixed height
404
+ template <typename T, int Size, int Rows_, int Options_>
405
+ class DenseStorage<T, Size, Rows_, Dynamic, Options_> {
406
+ internal::plain_array<T, Size, Options_> m_data;
407
+ Index m_cols;
408
+
409
+ public:
410
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() : m_cols(0) {}
411
+ EIGEN_DEVICE_FUNC explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert)
412
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
413
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(const DenseStorage& other)
414
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(other.m_cols) {
415
+ internal::plain_array_helper::copy(other.m_data, Rows_ * m_cols, m_data);
416
+ }
417
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
418
+ if (this != &other) {
419
+ m_cols = other.m_cols;
420
+ internal::plain_array_helper::copy(other.m_data, Rows_ * m_cols, m_data);
421
+ }
422
+ return *this;
423
+ }
424
+ EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {}
425
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
426
+ internal::plain_array_helper::swap(m_data, Rows_ * m_cols, other.m_data, Rows_ * other.m_cols);
427
+ numext::swap(m_cols, other.m_cols);
428
+ }
429
+ EIGEN_DEVICE_FUNC constexpr Index rows(void) const EIGEN_NOEXCEPT { return Rows_; }
430
+ EIGEN_DEVICE_FUNC constexpr Index cols(void) const EIGEN_NOEXCEPT { return m_cols; }
431
+ EIGEN_DEVICE_FUNC constexpr void conservativeResize(Index, Index, Index cols) { m_cols = cols; }
432
+ EIGEN_DEVICE_FUNC constexpr void resize(Index, Index, Index cols) { m_cols = cols; }
433
+ EIGEN_DEVICE_FUNC constexpr const T* data() const { return m_data.array; }
434
+ EIGEN_DEVICE_FUNC constexpr T* data() { return m_data.array; }
435
+ };
436
+
437
+ // purely dynamic matrix.
438
+ template <typename T, int Options_>
439
+ class DenseStorage<T, Dynamic, Dynamic, Dynamic, Options_> {
440
+ T* m_data;
441
+ Index m_rows;
442
+ Index m_cols;
443
+
444
+ public:
445
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
446
+ EIGEN_DEVICE_FUNC explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert)
447
+ : m_data(0), m_rows(0), m_cols(0) {}
448
+ EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols)
449
+ : m_data(internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(size)),
450
+ m_rows(rows),
451
+ m_cols(cols) {
452
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
453
+ eigen_internal_assert(size == rows * cols && rows >= 0 && cols >= 0);
454
+ }
455
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
456
+ : m_data(internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(other.m_rows * other.m_cols)),
457
+ m_rows(other.m_rows),
458
+ m_cols(other.m_cols) {
459
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows * m_cols)
460
+ internal::smart_copy(other.m_data, other.m_data + other.m_rows * other.m_cols, m_data);
461
+ }
462
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
463
+ if (this != &other) {
464
+ DenseStorage tmp(other);
465
+ this->swap(tmp);
466
+ }
467
+ return *this;
468
+ }
469
+ EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)),
470
+ m_rows(std::move(other.m_rows)),
471
+ m_cols(std::move(other.m_cols)) {
472
+ other.m_data = nullptr;
473
+ other.m_rows = 0;
474
+ other.m_cols = 0;
475
+ }
476
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT {
477
+ numext::swap(m_data, other.m_data);
478
+ numext::swap(m_rows, other.m_rows);
479
+ numext::swap(m_cols, other.m_cols);
480
+ return *this;
481
+ }
482
+ EIGEN_DEVICE_FUNC ~DenseStorage() {
483
+ internal::conditional_aligned_delete_auto<T, (Options_ & DontAlign) == 0>(m_data, m_rows * m_cols);
484
+ }
485
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
486
+ numext::swap(m_data, other.m_data);
487
+ numext::swap(m_rows, other.m_rows);
488
+ numext::swap(m_cols, other.m_cols);
489
+ }
490
+ EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT { return m_rows; }
491
+ EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT { return m_cols; }
492
+ void conservativeResize(Index size, Index rows, Index cols) {
493
+ m_data =
494
+ internal::conditional_aligned_realloc_new_auto<T, (Options_ & DontAlign) == 0>(m_data, size, m_rows * m_cols);
495
+ m_rows = rows;
496
+ m_cols = cols;
497
+ }
498
+ EIGEN_DEVICE_FUNC void resize(Index size, Index rows, Index cols) {
499
+ if (size != m_rows * m_cols) {
500
+ internal::conditional_aligned_delete_auto<T, (Options_ & DontAlign) == 0>(m_data, m_rows * m_cols);
501
+ if (size > 0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
502
+ m_data = internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(size);
503
+ else
504
+ m_data = 0;
505
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
506
+ }
507
+ m_rows = rows;
508
+ m_cols = cols;
509
+ }
510
+ EIGEN_DEVICE_FUNC const T* data() const { return m_data; }
511
+ EIGEN_DEVICE_FUNC T* data() { return m_data; }
512
+ };
513
+
514
+ // matrix with dynamic width and fixed height (so that matrix has dynamic size).
515
+ template <typename T, int Rows_, int Options_>
516
+ class DenseStorage<T, Dynamic, Rows_, Dynamic, Options_> {
517
+ T* m_data;
518
+ Index m_cols;
519
+
520
+ public:
521
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() : m_data(0), m_cols(0) {}
522
+ explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
523
+ EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols)
524
+ : m_data(internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(size)), m_cols(cols) {
525
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
526
+ eigen_internal_assert(size == rows * cols && rows == Rows_ && cols >= 0);
527
+ EIGEN_UNUSED_VARIABLE(rows);
528
+ }
529
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
530
+ : m_data(internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(Rows_ * other.m_cols)),
531
+ m_cols(other.m_cols) {
532
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols * Rows_)
533
+ internal::smart_copy(other.m_data, other.m_data + Rows_ * m_cols, m_data);
534
+ }
535
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
536
+ if (this != &other) {
537
+ DenseStorage tmp(other);
538
+ this->swap(tmp);
539
+ }
540
+ return *this;
541
+ }
542
+ EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)),
543
+ m_cols(std::move(other.m_cols)) {
544
+ other.m_data = nullptr;
545
+ other.m_cols = 0;
546
+ }
547
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT {
548
+ numext::swap(m_data, other.m_data);
549
+ numext::swap(m_cols, other.m_cols);
550
+ return *this;
551
+ }
552
+ EIGEN_DEVICE_FUNC ~DenseStorage() {
553
+ internal::conditional_aligned_delete_auto<T, (Options_ & DontAlign) == 0>(m_data, Rows_ * m_cols);
554
+ }
555
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
556
+ numext::swap(m_data, other.m_data);
557
+ numext::swap(m_cols, other.m_cols);
558
+ }
559
+ EIGEN_DEVICE_FUNC static constexpr Index rows(void) EIGEN_NOEXCEPT { return Rows_; }
560
+ EIGEN_DEVICE_FUNC Index cols(void) const EIGEN_NOEXCEPT { return m_cols; }
561
+ EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols) {
562
+ m_data =
563
+ internal::conditional_aligned_realloc_new_auto<T, (Options_ & DontAlign) == 0>(m_data, size, Rows_ * m_cols);
564
+ m_cols = cols;
565
+ }
566
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index, Index cols) {
567
+ if (size != Rows_ * m_cols) {
568
+ internal::conditional_aligned_delete_auto<T, (Options_ & DontAlign) == 0>(m_data, Rows_ * m_cols);
569
+ if (size > 0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
570
+ m_data = internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(size);
571
+ else
572
+ m_data = 0;
573
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
574
+ }
575
+ m_cols = cols;
576
+ }
577
+ EIGEN_DEVICE_FUNC const T* data() const { return m_data; }
578
+ EIGEN_DEVICE_FUNC T* data() { return m_data; }
579
+ };
580
+
581
+ // matrix with dynamic height and fixed width (so that matrix has dynamic size).
582
+ template <typename T, int Cols_, int Options_>
583
+ class DenseStorage<T, Dynamic, Dynamic, Cols_, Options_> {
584
+ T* m_data;
585
+ Index m_rows;
586
+
587
+ public:
588
+ EIGEN_DEVICE_FUNC constexpr DenseStorage() : m_data(0), m_rows(0) {}
589
+ explicit constexpr DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
590
+ EIGEN_DEVICE_FUNC constexpr DenseStorage(Index size, Index rows, Index cols)
591
+ : m_data(internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(size)), m_rows(rows) {
592
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
593
+ eigen_internal_assert(size == rows * cols && rows >= 0 && cols == Cols_);
594
+ EIGEN_UNUSED_VARIABLE(cols);
595
+ }
596
+ EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
597
+ : m_data(internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(other.m_rows * Cols_)),
598
+ m_rows(other.m_rows) {
599
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows * Cols_)
600
+ internal::smart_copy(other.m_data, other.m_data + other.m_rows * Cols_, m_data);
601
+ }
602
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) {
603
+ if (this != &other) {
604
+ DenseStorage tmp(other);
605
+ this->swap(tmp);
606
+ }
607
+ return *this;
608
+ }
609
+ EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)),
610
+ m_rows(std::move(other.m_rows)) {
611
+ other.m_data = nullptr;
612
+ other.m_rows = 0;
613
+ }
614
+ EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT {
615
+ numext::swap(m_data, other.m_data);
616
+ numext::swap(m_rows, other.m_rows);
617
+ return *this;
618
+ }
619
+ EIGEN_DEVICE_FUNC ~DenseStorage() {
620
+ internal::conditional_aligned_delete_auto<T, (Options_ & DontAlign) == 0>(m_data, Cols_ * m_rows);
621
+ }
622
+ EIGEN_DEVICE_FUNC void swap(DenseStorage& other) {
623
+ numext::swap(m_data, other.m_data);
624
+ numext::swap(m_rows, other.m_rows);
625
+ }
626
+ EIGEN_DEVICE_FUNC Index rows(void) const EIGEN_NOEXCEPT { return m_rows; }
627
+ EIGEN_DEVICE_FUNC static constexpr Index cols(void) { return Cols_; }
628
+ void conservativeResize(Index size, Index rows, Index) {
629
+ m_data =
630
+ internal::conditional_aligned_realloc_new_auto<T, (Options_ & DontAlign) == 0>(m_data, size, m_rows * Cols_);
631
+ m_rows = rows;
632
+ }
633
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index rows, Index) {
634
+ if (size != m_rows * Cols_) {
635
+ internal::conditional_aligned_delete_auto<T, (Options_ & DontAlign) == 0>(m_data, Cols_ * m_rows);
636
+ if (size > 0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative
637
+ m_data = internal::conditional_aligned_new_auto<T, (Options_ & DontAlign) == 0>(size);
638
+ else
639
+ m_data = 0;
640
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
641
+ }
642
+ m_rows = rows;
643
+ }
644
+ EIGEN_DEVICE_FUNC const T* data() const { return m_data; }
645
+ EIGEN_DEVICE_FUNC T* data() { return m_data; }
646
+ };
647
+
648
+ } // end namespace Eigen
649
+
650
+ #endif // EIGEN_MATRIX_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DeviceWrapper.h ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2023 Charlie Schlosser <cs.schlosser@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_DEVICEWRAPPER_H
11
+ #define EIGEN_DEVICEWRAPPER_H
12
+
13
+ namespace Eigen {
14
+ template <typename Derived, typename Device>
15
+ struct DeviceWrapper {
16
+ using Base = EigenBase<internal::remove_all_t<Derived>>;
17
+ using Scalar = typename Derived::Scalar;
18
+
19
+ EIGEN_DEVICE_FUNC DeviceWrapper(Base& xpr, Device& device) : m_xpr(xpr.derived()), m_device(device) {}
20
+ EIGEN_DEVICE_FUNC DeviceWrapper(const Base& xpr, Device& device) : m_xpr(xpr.derived()), m_device(device) {}
21
+
22
+ template <typename OtherDerived>
23
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived>& other) {
24
+ using AssignOp = internal::assign_op<Scalar, typename OtherDerived::Scalar>;
25
+ internal::call_assignment(*this, other.derived(), AssignOp());
26
+ return m_xpr;
27
+ }
28
+ template <typename OtherDerived>
29
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const EigenBase<OtherDerived>& other) {
30
+ using AddAssignOp = internal::add_assign_op<Scalar, typename OtherDerived::Scalar>;
31
+ internal::call_assignment(*this, other.derived(), AddAssignOp());
32
+ return m_xpr;
33
+ }
34
+ template <typename OtherDerived>
35
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const EigenBase<OtherDerived>& other) {
36
+ using SubAssignOp = internal::sub_assign_op<Scalar, typename OtherDerived::Scalar>;
37
+ internal::call_assignment(*this, other.derived(), SubAssignOp());
38
+ return m_xpr;
39
+ }
40
+
41
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& derived() { return m_xpr; }
42
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Device& device() { return m_device; }
43
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NoAlias<DeviceWrapper, EigenBase> noalias() {
44
+ return NoAlias<DeviceWrapper, EigenBase>(*this);
45
+ }
46
+
47
+ Derived& m_xpr;
48
+ Device& m_device;
49
+ };
50
+
51
+ namespace internal {
52
+
53
+ // this is where we differentiate between lazy assignment and specialized kernels (e.g. matrix products)
54
+ template <typename DstXprType, typename SrcXprType, typename Functor, typename Device,
55
+ typename Kind = typename AssignmentKind<typename evaluator_traits<DstXprType>::Shape,
56
+ typename evaluator_traits<SrcXprType>::Shape>::Kind,
57
+ typename EnableIf = void>
58
+ struct AssignmentWithDevice;
59
+
60
+ // unless otherwise specified, use the default product implementation
61
+ template <typename DstXprType, typename Lhs, typename Rhs, int Options, typename Functor, typename Device,
62
+ typename Weak>
63
+ struct AssignmentWithDevice<DstXprType, Product<Lhs, Rhs, Options>, Functor, Device, Dense2Dense, Weak> {
64
+ using SrcXprType = Product<Lhs, Rhs, Options>;
65
+ using Base = Assignment<DstXprType, SrcXprType, Functor>;
66
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func,
67
+ Device&) {
68
+ Base::run(dst, src, func);
69
+ };
70
+ };
71
+
72
+ // specialization for coeffcient-wise assignment
73
+ template <typename DstXprType, typename SrcXprType, typename Functor, typename Device, typename Weak>
74
+ struct AssignmentWithDevice<DstXprType, SrcXprType, Functor, Device, Dense2Dense, Weak> {
75
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType& dst, const SrcXprType& src, const Functor& func,
76
+ Device& device) {
77
+ #ifndef EIGEN_NO_DEBUG
78
+ internal::check_for_aliasing(dst, src);
79
+ #endif
80
+
81
+ call_dense_assignment_loop(dst, src, func, device);
82
+ }
83
+ };
84
+
85
+ // this allows us to use the default evaulation scheme if it is not specialized for the device
86
+ template <typename Kernel, typename Device, int Traversal = Kernel::AssignmentTraits::Traversal,
87
+ int Unrolling = Kernel::AssignmentTraits::Unrolling>
88
+ struct dense_assignment_loop_with_device {
89
+ using Base = dense_assignment_loop<Kernel, Traversal, Unrolling>;
90
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void run(Kernel& kernel, Device&) { Base::run(kernel); }
91
+ };
92
+
93
+ // entry point for a generic expression with device
94
+ template <typename Dst, typename Src, typename Func, typename Device>
95
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_assignment_no_alias(DeviceWrapper<Dst, Device> dst,
96
+ const Src& src, const Func& func) {
97
+ enum {
98
+ NeedToTranspose = ((int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) ||
99
+ (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)) &&
100
+ int(Dst::SizeAtCompileTime) != 1
101
+ };
102
+
103
+ using ActualDstTypeCleaned = std::conditional_t<NeedToTranspose, Transpose<Dst>, Dst>;
104
+ using ActualDstType = std::conditional_t<NeedToTranspose, Transpose<Dst>, Dst&>;
105
+ ActualDstType actualDst(dst.derived());
106
+
107
+ // TODO check whether this is the right place to perform these checks:
108
+ EIGEN_STATIC_ASSERT_LVALUE(Dst)
109
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned, Src)
110
+ EIGEN_CHECK_BINARY_COMPATIBILIY(Func, typename ActualDstTypeCleaned::Scalar, typename Src::Scalar);
111
+
112
+ // this provides a mechanism for specializing simple assignments, matrix products, etc
113
+ AssignmentWithDevice<ActualDstTypeCleaned, Src, Func, Device>::run(actualDst, src, func, dst.device());
114
+ }
115
+
116
+ // copy and pasted from AssignEvaluator except forward device to kernel
117
+ template <typename DstXprType, typename SrcXprType, typename Functor, typename Device>
118
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR void call_dense_assignment_loop(DstXprType& dst,
119
+ const SrcXprType& src,
120
+ const Functor& func,
121
+ Device& device) {
122
+ using DstEvaluatorType = evaluator<DstXprType>;
123
+ using SrcEvaluatorType = evaluator<SrcXprType>;
124
+
125
+ SrcEvaluatorType srcEvaluator(src);
126
+
127
+ // NOTE To properly handle A = (A*A.transpose())/s with A rectangular,
128
+ // we need to resize the destination after the source evaluator has been created.
129
+ resize_if_allowed(dst, src, func);
130
+
131
+ DstEvaluatorType dstEvaluator(dst);
132
+
133
+ using Kernel = generic_dense_assignment_kernel<DstEvaluatorType, SrcEvaluatorType, Functor>;
134
+
135
+ Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
136
+
137
+ dense_assignment_loop_with_device<Kernel, Device>::run(kernel, device);
138
+ }
139
+
140
+ } // namespace internal
141
+
142
+ template <typename Derived>
143
+ template <typename Device>
144
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper<Derived, Device> EigenBase<Derived>::device(Device& device) {
145
+ return DeviceWrapper<Derived, Device>(derived(), device);
146
+ }
147
+
148
+ template <typename Derived>
149
+ template <typename Device>
150
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DeviceWrapper<const Derived, Device> EigenBase<Derived>::device(
151
+ Device& device) const {
152
+ return DeviceWrapper<const Derived, Device>(derived(), device);
153
+ }
154
+ } // namespace Eigen
155
+ #endif
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Diagonal.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_DIAGONAL_H
12
+ #define EIGEN_DIAGONAL_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \class Diagonal
20
+ * \ingroup Core_Module
21
+ *
22
+ * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
23
+ *
24
+ * \tparam MatrixType the type of the object in which we are taking a sub/main/super diagonal
25
+ * \tparam DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
26
+ * A positive value means a superdiagonal, a negative value means a subdiagonal.
27
+ * You can also use DynamicIndex so the index can be set at runtime.
28
+ *
29
+ * The matrix is not required to be square.
30
+ *
31
+ * This class represents an expression of the main diagonal, or any sub/super diagonal
32
+ * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
33
+ * time this is the only way it is used.
34
+ *
35
+ * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
36
+ */
37
+
38
+ namespace internal {
39
+ template <typename MatrixType, int DiagIndex>
40
+ struct traits<Diagonal<MatrixType, DiagIndex> > : traits<MatrixType> {
41
+ typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
42
+ typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
43
+ typedef typename MatrixType::StorageKind StorageKind;
44
+ enum {
45
+ RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic)
46
+ ? Dynamic
47
+ : (plain_enum_min(MatrixType::RowsAtCompileTime - plain_enum_max(-DiagIndex, 0),
48
+ MatrixType::ColsAtCompileTime - plain_enum_max(DiagIndex, 0))),
49
+ ColsAtCompileTime = 1,
50
+ MaxRowsAtCompileTime =
51
+ int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
52
+ : DiagIndex == DynamicIndex
53
+ ? min_size_prefer_fixed(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime)
54
+ : (plain_enum_min(MatrixType::MaxRowsAtCompileTime - plain_enum_max(-DiagIndex, 0),
55
+ MatrixType::MaxColsAtCompileTime - plain_enum_max(DiagIndex, 0))),
56
+ MaxColsAtCompileTime = 1,
57
+ MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
58
+ Flags = (unsigned int)MatrixTypeNested_::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) &
59
+ ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions
60
+ MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
61
+ InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride + 1,
62
+ OuterStrideAtCompileTime = 0
63
+ };
64
+ };
65
+ } // namespace internal
66
+
67
+ template <typename MatrixType, int DiagIndex_>
68
+ class Diagonal : public internal::dense_xpr_base<Diagonal<MatrixType, DiagIndex_> >::type {
69
+ public:
70
+ enum { DiagIndex = DiagIndex_ };
71
+ typedef typename internal::dense_xpr_base<Diagonal>::type Base;
72
+ EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
73
+
74
+ EIGEN_DEVICE_FUNC explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex)
75
+ : m_matrix(matrix), m_index(a_index) {
76
+ eigen_assert(a_index <= m_matrix.cols() && -a_index <= m_matrix.rows());
77
+ }
78
+
79
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
80
+
81
+ EIGEN_DEVICE_FUNC inline Index rows() const {
82
+ return m_index.value() < 0 ? numext::mini<Index>(m_matrix.cols(), m_matrix.rows() + m_index.value())
83
+ : numext::mini<Index>(m_matrix.rows(), m_matrix.cols() - m_index.value());
84
+ }
85
+
86
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return 1; }
87
+
88
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT {
89
+ return m_matrix.outerStride() + 1;
90
+ }
91
+
92
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return 0; }
93
+
94
+ typedef std::conditional_t<internal::is_lvalue<MatrixType>::value, Scalar, const Scalar> ScalarWithConstIfNotLvalue;
95
+
96
+ EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }
97
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }
98
+
99
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index) {
100
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
101
+ return m_matrix.coeffRef(row + rowOffset(), row + colOffset());
102
+ }
103
+
104
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index) const {
105
+ return m_matrix.coeffRef(row + rowOffset(), row + colOffset());
106
+ }
107
+
108
+ EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index row, Index) const {
109
+ return m_matrix.coeff(row + rowOffset(), row + colOffset());
110
+ }
111
+
112
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index idx) {
113
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
114
+ return m_matrix.coeffRef(idx + rowOffset(), idx + colOffset());
115
+ }
116
+
117
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index idx) const {
118
+ return m_matrix.coeffRef(idx + rowOffset(), idx + colOffset());
119
+ }
120
+
121
+ EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index idx) const {
122
+ return m_matrix.coeff(idx + rowOffset(), idx + colOffset());
123
+ }
124
+
125
+ EIGEN_DEVICE_FUNC inline const internal::remove_all_t<typename MatrixType::Nested>& nestedExpression() const {
126
+ return m_matrix;
127
+ }
128
+
129
+ EIGEN_DEVICE_FUNC inline Index index() const { return m_index.value(); }
130
+
131
+ protected:
132
+ typename internal::ref_selector<MatrixType>::non_const_type m_matrix;
133
+ const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
134
+
135
+ private:
136
+ // some compilers may fail to optimize std::max etc in case of compile-time constants...
137
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index absDiagIndex() const EIGEN_NOEXCEPT {
138
+ return m_index.value() > 0 ? m_index.value() : -m_index.value();
139
+ }
140
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rowOffset() const EIGEN_NOEXCEPT {
141
+ return m_index.value() > 0 ? 0 : -m_index.value();
142
+ }
143
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index colOffset() const EIGEN_NOEXCEPT {
144
+ return m_index.value() > 0 ? m_index.value() : 0;
145
+ }
146
+ // trigger a compile-time error if someone try to call packet
147
+ template <int LoadMode>
148
+ typename MatrixType::PacketReturnType packet(Index) const;
149
+ template <int LoadMode>
150
+ typename MatrixType::PacketReturnType packet(Index, Index) const;
151
+ };
152
+
153
+ /** \returns an expression of the main diagonal of the matrix \c *this
154
+ *
155
+ * \c *this is not required to be square.
156
+ *
157
+ * Example: \include MatrixBase_diagonal.cpp
158
+ * Output: \verbinclude MatrixBase_diagonal.out
159
+ *
160
+ * \sa class Diagonal */
161
+ template <typename Derived>
162
+ EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalReturnType MatrixBase<Derived>::diagonal() {
163
+ return DiagonalReturnType(derived());
164
+ }
165
+
166
+ /** This is the const version of diagonal(). */
167
+ template <typename Derived>
168
+ EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::ConstDiagonalReturnType MatrixBase<Derived>::diagonal()
169
+ const {
170
+ return ConstDiagonalReturnType(derived());
171
+ }
172
+
173
+ /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
174
+ *
175
+ * \c *this is not required to be square.
176
+ *
177
+ * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
178
+ * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
179
+ *
180
+ * Example: \include MatrixBase_diagonal_int.cpp
181
+ * Output: \verbinclude MatrixBase_diagonal_int.out
182
+ *
183
+ * \sa MatrixBase::diagonal(), class Diagonal */
184
+ template <typename Derived>
185
+ EIGEN_DEVICE_FUNC inline Diagonal<Derived, DynamicIndex> MatrixBase<Derived>::diagonal(Index index) {
186
+ return Diagonal<Derived, DynamicIndex>(derived(), index);
187
+ }
188
+
189
+ /** This is the const version of diagonal(Index). */
190
+ template <typename Derived>
191
+ EIGEN_DEVICE_FUNC inline const Diagonal<const Derived, DynamicIndex> MatrixBase<Derived>::diagonal(Index index) const {
192
+ return Diagonal<const Derived, DynamicIndex>(derived(), index);
193
+ }
194
+
195
+ /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
196
+ *
197
+ * \c *this is not required to be square.
198
+ *
199
+ * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
200
+ * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
201
+ *
202
+ * Example: \include MatrixBase_diagonal_template_int.cpp
203
+ * Output: \verbinclude MatrixBase_diagonal_template_int.out
204
+ *
205
+ * \sa MatrixBase::diagonal(), class Diagonal */
206
+ template <typename Derived>
207
+ template <int Index_>
208
+ EIGEN_DEVICE_FUNC inline Diagonal<Derived, Index_> MatrixBase<Derived>::diagonal() {
209
+ return Diagonal<Derived, Index_>(derived());
210
+ }
211
+
212
+ /** This is the const version of diagonal<int>(). */
213
+ template <typename Derived>
214
+ template <int Index_>
215
+ EIGEN_DEVICE_FUNC inline const Diagonal<const Derived, Index_> MatrixBase<Derived>::diagonal() const {
216
+ return Diagonal<const Derived, Index_>(derived());
217
+ }
218
+
219
+ } // end namespace Eigen
220
+
221
+ #endif // EIGEN_DIAGONAL_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DiagonalMatrix.h ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_DIAGONALMATRIX_H
12
+ #define EIGEN_DIAGONALMATRIX_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \class DiagonalBase
20
+ * \ingroup Core_Module
21
+ *
22
+ * \brief Base class for diagonal matrices and expressions
23
+ *
24
+ * This is the base class that is inherited by diagonal matrix and related expression
25
+ * types, which internally use a vector for storing the diagonal entries. Diagonal
26
+ * types always represent square matrices.
27
+ *
28
+ * \tparam Derived is the derived type, a DiagonalMatrix or DiagonalWrapper.
29
+ *
30
+ * \sa class DiagonalMatrix, class DiagonalWrapper
31
+ */
32
+ template <typename Derived>
33
+ class DiagonalBase : public EigenBase<Derived> {
34
+ public:
35
+ typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
36
+ typedef typename DiagonalVectorType::Scalar Scalar;
37
+ typedef typename DiagonalVectorType::RealScalar RealScalar;
38
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
39
+ typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
40
+
41
+ enum {
42
+ RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
43
+ ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
44
+ MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
45
+ MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
46
+ IsVectorAtCompileTime = 0,
47
+ Flags = NoPreferredStorageOrderBit
48
+ };
49
+
50
+ typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime>
51
+ DenseMatrixType;
52
+ typedef DenseMatrixType DenseType;
53
+ typedef DiagonalMatrix<Scalar, DiagonalVectorType::SizeAtCompileTime, DiagonalVectorType::MaxSizeAtCompileTime>
54
+ PlainObject;
55
+
56
+ /** \returns a reference to the derived object. */
57
+ EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
58
+ /** \returns a const reference to the derived object. */
59
+ EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); }
60
+
61
+ /**
62
+ * Constructs a dense matrix from \c *this. Note, this directly returns a dense matrix type,
63
+ * not an expression.
64
+ * \returns A dense matrix, with its diagonal entries set from the the derived object. */
65
+ EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { return derived(); }
66
+
67
+ /** \returns a reference to the derived object's vector of diagonal coefficients. */
68
+ EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
69
+ /** \returns a const reference to the derived object's vector of diagonal coefficients. */
70
+ EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
71
+
72
+ /** \returns the value of the coefficient as if \c *this was a dense matrix. */
73
+ EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const {
74
+ eigen_assert(row >= 0 && col >= 0 && row < rows() && col <= cols());
75
+ return row == col ? diagonal().coeff(row) : Scalar(0);
76
+ }
77
+
78
+ /** \returns the number of rows. */
79
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const { return diagonal().size(); }
80
+ /** \returns the number of columns. */
81
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const { return diagonal().size(); }
82
+
83
+ /** \returns the diagonal matrix product of \c *this by the dense matrix, \a matrix */
84
+ template <typename MatrixDerived>
85
+ EIGEN_DEVICE_FUNC const Product<Derived, MatrixDerived, LazyProduct> operator*(
86
+ const MatrixBase<MatrixDerived>& matrix) const {
87
+ return Product<Derived, MatrixDerived, LazyProduct>(derived(), matrix.derived());
88
+ }
89
+
90
+ template <typename OtherDerived>
91
+ using DiagonalProductReturnType = DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
92
+ DiagonalVectorType, typename OtherDerived::DiagonalVectorType, product)>;
93
+
94
+ /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a other */
95
+ template <typename OtherDerived>
96
+ EIGEN_DEVICE_FUNC const DiagonalProductReturnType<OtherDerived> operator*(
97
+ const DiagonalBase<OtherDerived>& other) const {
98
+ return diagonal().cwiseProduct(other.diagonal()).asDiagonal();
99
+ }
100
+
101
+ using DiagonalInverseReturnType =
102
+ DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType>>;
103
+
104
+ /** \returns the inverse \c *this. Computed as the coefficient-wise inverse of the diagonal. */
105
+ EIGEN_DEVICE_FUNC inline const DiagonalInverseReturnType inverse() const {
106
+ return diagonal().cwiseInverse().asDiagonal();
107
+ }
108
+
109
+ using DiagonalScaleReturnType =
110
+ DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType, Scalar, product)>;
111
+
112
+ /** \returns the product of \c *this by the scalar \a scalar */
113
+ EIGEN_DEVICE_FUNC inline const DiagonalScaleReturnType operator*(const Scalar& scalar) const {
114
+ return (diagonal() * scalar).asDiagonal();
115
+ }
116
+
117
+ using ScaleDiagonalReturnType =
118
+ DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar, DiagonalVectorType, product)>;
119
+
120
+ /** \returns the product of a scalar and the diagonal matrix \a other */
121
+ EIGEN_DEVICE_FUNC friend inline const ScaleDiagonalReturnType operator*(const Scalar& scalar,
122
+ const DiagonalBase& other) {
123
+ return (scalar * other.diagonal()).asDiagonal();
124
+ }
125
+
126
+ template <typename OtherDerived>
127
+ using DiagonalSumReturnType = DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
128
+ DiagonalVectorType, typename OtherDerived::DiagonalVectorType, sum)>;
129
+
130
+ /** \returns the sum of \c *this and the diagonal matrix \a other */
131
+ template <typename OtherDerived>
132
+ EIGEN_DEVICE_FUNC inline const DiagonalSumReturnType<OtherDerived> operator+(
133
+ const DiagonalBase<OtherDerived>& other) const {
134
+ return (diagonal() + other.diagonal()).asDiagonal();
135
+ }
136
+
137
+ template <typename OtherDerived>
138
+ using DiagonalDifferenceReturnType = DiagonalWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
139
+ DiagonalVectorType, typename OtherDerived::DiagonalVectorType, difference)>;
140
+
141
+ /** \returns the difference of \c *this and the diagonal matrix \a other */
142
+ template <typename OtherDerived>
143
+ EIGEN_DEVICE_FUNC inline const DiagonalDifferenceReturnType<OtherDerived> operator-(
144
+ const DiagonalBase<OtherDerived>& other) const {
145
+ return (diagonal() - other.diagonal()).asDiagonal();
146
+ }
147
+ };
148
+
149
+ /** \class DiagonalMatrix
150
+ * \ingroup Core_Module
151
+ *
152
+ * \brief Represents a diagonal matrix with its storage
153
+ *
154
+ * \tparam Scalar_ the type of coefficients
155
+ * \tparam SizeAtCompileTime the dimension of the matrix, or Dynamic
156
+ * \tparam MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults
157
+ * to SizeAtCompileTime. Most of the time, you do not need to specify it.
158
+ *
159
+ * \sa class DiagonalBase, class DiagonalWrapper
160
+ */
161
+
162
+ namespace internal {
163
+ template <typename Scalar_, int SizeAtCompileTime, int MaxSizeAtCompileTime>
164
+ struct traits<DiagonalMatrix<Scalar_, SizeAtCompileTime, MaxSizeAtCompileTime>>
165
+ : traits<Matrix<Scalar_, SizeAtCompileTime, SizeAtCompileTime, 0, MaxSizeAtCompileTime, MaxSizeAtCompileTime>> {
166
+ typedef Matrix<Scalar_, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> DiagonalVectorType;
167
+ typedef DiagonalShape StorageKind;
168
+ enum { Flags = LvalueBit | NoPreferredStorageOrderBit | NestByRefBit };
169
+ };
170
+ } // namespace internal
171
+ template <typename Scalar_, int SizeAtCompileTime, int MaxSizeAtCompileTime>
172
+ class DiagonalMatrix : public DiagonalBase<DiagonalMatrix<Scalar_, SizeAtCompileTime, MaxSizeAtCompileTime>> {
173
+ public:
174
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
175
+ typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
176
+ typedef const DiagonalMatrix& Nested;
177
+ typedef Scalar_ Scalar;
178
+ typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
179
+ typedef typename internal::traits<DiagonalMatrix>::StorageIndex StorageIndex;
180
+ #endif
181
+
182
+ protected:
183
+ DiagonalVectorType m_diagonal;
184
+
185
+ public:
186
+ /** const version of diagonal(). */
187
+ EIGEN_DEVICE_FUNC inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
188
+ /** \returns a reference to the stored vector of diagonal coefficients. */
189
+ EIGEN_DEVICE_FUNC inline DiagonalVectorType& diagonal() { return m_diagonal; }
190
+
191
+ /** Default constructor without initialization */
192
+ EIGEN_DEVICE_FUNC inline DiagonalMatrix() {}
193
+
194
+ /** Constructs a diagonal matrix with given dimension */
195
+ EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
196
+
197
+ /** 2D constructor. */
198
+ EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x, y) {}
199
+
200
+ /** 3D constructor. */
201
+ EIGEN_DEVICE_FUNC inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x, y, z) {}
202
+
203
+ /** \brief Construct a diagonal matrix with fixed size from an arbitrary number of coefficients.
204
+ *
205
+ * \warning To construct a diagonal matrix of fixed size, the number of values passed to this
206
+ * constructor must match the fixed dimension of \c *this.
207
+ *
208
+ * \sa DiagonalMatrix(const Scalar&, const Scalar&)
209
+ * \sa DiagonalMatrix(const Scalar&, const Scalar&, const Scalar&)
210
+ */
211
+ template <typename... ArgTypes>
212
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DiagonalMatrix(const Scalar& a0, const Scalar& a1, const Scalar& a2,
213
+ const ArgTypes&... args)
214
+ : m_diagonal(a0, a1, a2, args...) {}
215
+
216
+ /** \brief Constructs a DiagonalMatrix and initializes it by elements given by an initializer list of initializer
217
+ * lists \cpp11
218
+ */
219
+ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE DiagonalMatrix(
220
+ const std::initializer_list<std::initializer_list<Scalar>>& list)
221
+ : m_diagonal(list) {}
222
+
223
+ /** \brief Constructs a DiagonalMatrix from an r-value diagonal vector type */
224
+ EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(DiagonalVectorType&& diag) : m_diagonal(std::move(diag)) {}
225
+
226
+ /** Copy constructor. */
227
+ template <typename OtherDerived>
228
+ EIGEN_DEVICE_FUNC inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
229
+
230
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
231
+ /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
232
+ inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}
233
+ #endif
234
+
235
+ /** generic constructor from expression of the diagonal coefficients */
236
+ template <typename OtherDerived>
237
+ EIGEN_DEVICE_FUNC explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other) {}
238
+
239
+ /** Copy operator. */
240
+ template <typename OtherDerived>
241
+ EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other) {
242
+ m_diagonal = other.diagonal();
243
+ return *this;
244
+ }
245
+
246
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
247
+ /** This is a special case of the templated operator=. Its purpose is to
248
+ * prevent a default operator= from hiding the templated operator=.
249
+ */
250
+ EIGEN_DEVICE_FUNC DiagonalMatrix& operator=(const DiagonalMatrix& other) {
251
+ m_diagonal = other.diagonal();
252
+ return *this;
253
+ }
254
+ #endif
255
+
256
+ typedef DiagonalWrapper<const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, DiagonalVectorType>>
257
+ InitializeReturnType;
258
+
259
+ /** Initializes a diagonal matrix of size SizeAtCompileTime with coefficients set to zero */
260
+ EIGEN_DEVICE_FUNC static const InitializeReturnType Zero() { return DiagonalVectorType::Zero().asDiagonal(); }
261
+ /** Initializes a diagonal matrix of size dim with coefficients set to zero */
262
+ EIGEN_DEVICE_FUNC static const InitializeReturnType Zero(Index size) {
263
+ return DiagonalVectorType::Zero(size).asDiagonal();
264
+ }
265
+ /** Initializes a identity matrix of size SizeAtCompileTime */
266
+ EIGEN_DEVICE_FUNC static const InitializeReturnType Identity() { return DiagonalVectorType::Ones().asDiagonal(); }
267
+ /** Initializes a identity matrix of size dim */
268
+ EIGEN_DEVICE_FUNC static const InitializeReturnType Identity(Index size) {
269
+ return DiagonalVectorType::Ones(size).asDiagonal();
270
+ }
271
+
272
+ /** Resizes to given size. */
273
+ EIGEN_DEVICE_FUNC inline void resize(Index size) { m_diagonal.resize(size); }
274
+ /** Sets all coefficients to zero. */
275
+ EIGEN_DEVICE_FUNC inline void setZero() { m_diagonal.setZero(); }
276
+ /** Resizes and sets all coefficients to zero. */
277
+ EIGEN_DEVICE_FUNC inline void setZero(Index size) { m_diagonal.setZero(size); }
278
+ /** Sets this matrix to be the identity matrix of the current size. */
279
+ EIGEN_DEVICE_FUNC inline void setIdentity() { m_diagonal.setOnes(); }
280
+ /** Sets this matrix to be the identity matrix of the given size. */
281
+ EIGEN_DEVICE_FUNC inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
282
+ };
283
+
284
+ /** \class DiagonalWrapper
285
+ * \ingroup Core_Module
286
+ *
287
+ * \brief Expression of a diagonal matrix
288
+ *
289
+ * \tparam DiagonalVectorType_ the type of the vector of diagonal coefficients
290
+ *
291
+ * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,
292
+ * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()
293
+ * and most of the time this is the only way that it is used.
294
+ *
295
+ * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()
296
+ */
297
+
298
+ namespace internal {
299
+ template <typename DiagonalVectorType_>
300
+ struct traits<DiagonalWrapper<DiagonalVectorType_>> {
301
+ typedef DiagonalVectorType_ DiagonalVectorType;
302
+ typedef typename DiagonalVectorType::Scalar Scalar;
303
+ typedef typename DiagonalVectorType::StorageIndex StorageIndex;
304
+ typedef DiagonalShape StorageKind;
305
+ typedef typename traits<DiagonalVectorType>::XprKind XprKind;
306
+ enum {
307
+ RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
308
+ ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
309
+ MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
310
+ MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
311
+ Flags = (traits<DiagonalVectorType>::Flags & LvalueBit) | NoPreferredStorageOrderBit
312
+ };
313
+ };
314
+ } // namespace internal
315
+
316
+ template <typename DiagonalVectorType_>
317
+ class DiagonalWrapper : public DiagonalBase<DiagonalWrapper<DiagonalVectorType_>>, internal::no_assignment_operator {
318
+ public:
319
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
320
+ typedef DiagonalVectorType_ DiagonalVectorType;
321
+ typedef DiagonalWrapper Nested;
322
+ #endif
323
+
324
+ /** Constructor from expression of diagonal coefficients to wrap. */
325
+ EIGEN_DEVICE_FUNC explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
326
+
327
+ /** \returns a const reference to the wrapped expression of diagonal coefficients. */
328
+ EIGEN_DEVICE_FUNC const DiagonalVectorType& diagonal() const { return m_diagonal; }
329
+
330
+ protected:
331
+ typename DiagonalVectorType::Nested m_diagonal;
332
+ };
333
+
334
+ /** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients
335
+ *
336
+ * \only_for_vectors
337
+ *
338
+ * Example: \include MatrixBase_asDiagonal.cpp
339
+ * Output: \verbinclude MatrixBase_asDiagonal.out
340
+ *
341
+ * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
342
+ **/
343
+ template <typename Derived>
344
+ EIGEN_DEVICE_FUNC inline const DiagonalWrapper<const Derived> MatrixBase<Derived>::asDiagonal() const {
345
+ return DiagonalWrapper<const Derived>(derived());
346
+ }
347
+
348
+ /** \returns true if *this is approximately equal to a diagonal matrix,
349
+ * within the precision given by \a prec.
350
+ *
351
+ * Example: \include MatrixBase_isDiagonal.cpp
352
+ * Output: \verbinclude MatrixBase_isDiagonal.out
353
+ *
354
+ * \sa asDiagonal()
355
+ */
356
+ template <typename Derived>
357
+ bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const {
358
+ if (cols() != rows()) return false;
359
+ RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
360
+ for (Index j = 0; j < cols(); ++j) {
361
+ RealScalar absOnDiagonal = numext::abs(coeff(j, j));
362
+ if (absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
363
+ }
364
+ for (Index j = 0; j < cols(); ++j)
365
+ for (Index i = 0; i < j; ++i) {
366
+ if (!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
367
+ if (!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
368
+ }
369
+ return true;
370
+ }
371
+
372
+ namespace internal {
373
+
374
+ template <>
375
+ struct storage_kind_to_shape<DiagonalShape> {
376
+ typedef DiagonalShape Shape;
377
+ };
378
+
379
+ struct Diagonal2Dense {};
380
+
381
+ template <>
382
+ struct AssignmentKind<DenseShape, DiagonalShape> {
383
+ typedef Diagonal2Dense Kind;
384
+ };
385
+
386
+ // Diagonal matrix to Dense assignment
387
+ template <typename DstXprType, typename SrcXprType, typename Functor>
388
+ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense> {
389
+ static void run(DstXprType& dst, const SrcXprType& src,
390
+ const internal::assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
391
+ Index dstRows = src.rows();
392
+ Index dstCols = src.cols();
393
+ if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols);
394
+
395
+ dst.setZero();
396
+ dst.diagonal() = src.diagonal();
397
+ }
398
+
399
+ static void run(DstXprType& dst, const SrcXprType& src,
400
+ const internal::add_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
401
+ dst.diagonal() += src.diagonal();
402
+ }
403
+
404
+ static void run(DstXprType& dst, const SrcXprType& src,
405
+ const internal::sub_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
406
+ dst.diagonal() -= src.diagonal();
407
+ }
408
+ };
409
+
410
+ } // namespace internal
411
+
412
+ } // end namespace Eigen
413
+
414
+ #endif // EIGEN_DIAGONALMATRIX_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/DiagonalProduct.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_DIAGONALPRODUCT_H
12
+ #define EIGEN_DIAGONALPRODUCT_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
20
+ */
21
+ template <typename Derived>
22
+ template <typename DiagonalDerived>
23
+ EIGEN_DEVICE_FUNC inline const Product<Derived, DiagonalDerived, LazyProduct> MatrixBase<Derived>::operator*(
24
+ const DiagonalBase<DiagonalDerived> &a_diagonal) const {
25
+ return Product<Derived, DiagonalDerived, LazyProduct>(derived(), a_diagonal.derived());
26
+ }
27
+
28
+ } // end namespace Eigen
29
+
30
+ #endif // EIGEN_DIAGONALPRODUCT_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Dot.h ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_DOT_H
11
+ #define EIGEN_DOT_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ // helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot
21
+ // with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE
22
+ // looking at the static assertions. Thus this is a trick to get better compile errors.
23
+ template <typename T, typename U,
24
+ bool NeedToTranspose = T::IsVectorAtCompileTime && U::IsVectorAtCompileTime &&
25
+ ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1) ||
26
+ (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))>
27
+ struct dot_nocheck {
28
+ typedef scalar_conj_product_op<typename traits<T>::Scalar, typename traits<U>::Scalar> conj_prod;
29
+ typedef typename conj_prod::result_type ResScalar;
30
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) {
31
+ return a.template binaryExpr<conj_prod>(b).sum();
32
+ }
33
+ };
34
+
35
+ template <typename T, typename U>
36
+ struct dot_nocheck<T, U, true> {
37
+ typedef scalar_conj_product_op<typename traits<T>::Scalar, typename traits<U>::Scalar> conj_prod;
38
+ typedef typename conj_prod::result_type ResScalar;
39
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) {
40
+ return a.transpose().template binaryExpr<conj_prod>(b).sum();
41
+ }
42
+ };
43
+
44
+ } // end namespace internal
45
+
46
+ /** \fn MatrixBase::dot
47
+ * \returns the dot product of *this with other.
48
+ *
49
+ * \only_for_vectors
50
+ *
51
+ * \note If the scalar type is complex numbers, then this function returns the hermitian
52
+ * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
53
+ * second variable.
54
+ *
55
+ * \sa squaredNorm(), norm()
56
+ */
57
+ template <typename Derived>
58
+ template <typename OtherDerived>
59
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
60
+ typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,
61
+ typename internal::traits<OtherDerived>::Scalar>::ReturnType
62
+ MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const {
63
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
64
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
65
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived, OtherDerived)
66
+ #if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
67
+ EIGEN_CHECK_BINARY_COMPATIBILIY(
68
+ Eigen::internal::scalar_conj_product_op<Scalar EIGEN_COMMA typename OtherDerived::Scalar>, Scalar,
69
+ typename OtherDerived::Scalar);
70
+ #endif
71
+
72
+ eigen_assert(size() == other.size());
73
+
74
+ return internal::dot_nocheck<Derived, OtherDerived>::run(*this, other);
75
+ }
76
+
77
+ //---------- implementation of L2 norm and related functions ----------
78
+
79
+ /** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the squared Frobenius norm.
80
+ * In both cases, it consists in the sum of the square of all the matrix entries.
81
+ * For vectors, this is also equals to the dot product of \c *this with itself.
82
+ *
83
+ * \sa dot(), norm(), lpNorm()
84
+ */
85
+ template <typename Derived>
86
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
87
+ MatrixBase<Derived>::squaredNorm() const {
88
+ return numext::real((*this).cwiseAbs2().sum());
89
+ }
90
+
91
+ /** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
92
+ * In both cases, it consists in the square root of the sum of the square of all the matrix entries.
93
+ * For vectors, this is also equals to the square root of the dot product of \c *this with itself.
94
+ *
95
+ * \sa lpNorm(), dot(), squaredNorm()
96
+ */
97
+ template <typename Derived>
98
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
99
+ MatrixBase<Derived>::norm() const {
100
+ return numext::sqrt(squaredNorm());
101
+ }
102
+
103
+ /** \returns an expression of the quotient of \c *this by its own norm.
104
+ *
105
+ * \warning If the input vector is too small (i.e., this->norm()==0),
106
+ * then this function returns a copy of the input.
107
+ *
108
+ * \only_for_vectors
109
+ *
110
+ * \sa norm(), normalize()
111
+ */
112
+ template <typename Derived>
113
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject MatrixBase<Derived>::normalized()
114
+ const {
115
+ typedef typename internal::nested_eval<Derived, 2>::type Nested_;
116
+ Nested_ n(derived());
117
+ RealScalar z = n.squaredNorm();
118
+ // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
119
+ if (z > RealScalar(0))
120
+ return n / numext::sqrt(z);
121
+ else
122
+ return n;
123
+ }
124
+
125
+ /** Normalizes the vector, i.e. divides it by its own norm.
126
+ *
127
+ * \only_for_vectors
128
+ *
129
+ * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged.
130
+ *
131
+ * \sa norm(), normalized()
132
+ */
133
+ template <typename Derived>
134
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize() {
135
+ RealScalar z = squaredNorm();
136
+ // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
137
+ if (z > RealScalar(0)) derived() /= numext::sqrt(z);
138
+ }
139
+
140
+ /** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow.
141
+ *
142
+ * \only_for_vectors
143
+ *
144
+ * This method is analogue to the normalized() method, but it reduces the risk of
145
+ * underflow and overflow when computing the norm.
146
+ *
147
+ * \warning If the input vector is too small (i.e., this->norm()==0),
148
+ * then this function returns a copy of the input.
149
+ *
150
+ * \sa stableNorm(), stableNormalize(), normalized()
151
+ */
152
+ template <typename Derived>
153
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
154
+ MatrixBase<Derived>::stableNormalized() const {
155
+ typedef typename internal::nested_eval<Derived, 3>::type Nested_;
156
+ Nested_ n(derived());
157
+ RealScalar w = n.cwiseAbs().maxCoeff();
158
+ RealScalar z = (n / w).squaredNorm();
159
+ if (z > RealScalar(0))
160
+ return n / (numext::sqrt(z) * w);
161
+ else
162
+ return n;
163
+ }
164
+
165
+ /** Normalizes the vector while avoid underflow and overflow
166
+ *
167
+ * \only_for_vectors
168
+ *
169
+ * This method is analogue to the normalize() method, but it reduces the risk of
170
+ * underflow and overflow when computing the norm.
171
+ *
172
+ * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged.
173
+ *
174
+ * \sa stableNorm(), stableNormalized(), normalize()
175
+ */
176
+ template <typename Derived>
177
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize() {
178
+ RealScalar w = cwiseAbs().maxCoeff();
179
+ RealScalar z = (derived() / w).squaredNorm();
180
+ if (z > RealScalar(0)) derived() /= numext::sqrt(z) * w;
181
+ }
182
+
183
+ //---------- implementation of other norms ----------
184
+
185
+ namespace internal {
186
+
187
+ template <typename Derived, int p>
188
+ struct lpNorm_selector {
189
+ typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
190
+ EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase<Derived>& m) {
191
+ EIGEN_USING_STD(pow)
192
+ return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1) / p);
193
+ }
194
+ };
195
+
196
+ template <typename Derived>
197
+ struct lpNorm_selector<Derived, 1> {
198
+ EIGEN_DEVICE_FUNC static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(
199
+ const MatrixBase<Derived>& m) {
200
+ return m.cwiseAbs().sum();
201
+ }
202
+ };
203
+
204
+ template <typename Derived>
205
+ struct lpNorm_selector<Derived, 2> {
206
+ EIGEN_DEVICE_FUNC static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(
207
+ const MatrixBase<Derived>& m) {
208
+ return m.norm();
209
+ }
210
+ };
211
+
212
+ template <typename Derived>
213
+ struct lpNorm_selector<Derived, Infinity> {
214
+ typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
215
+ EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase<Derived>& m) {
216
+ if (Derived::SizeAtCompileTime == 0 || (Derived::SizeAtCompileTime == Dynamic && m.size() == 0))
217
+ return RealScalar(0);
218
+ return m.cwiseAbs().maxCoeff();
219
+ }
220
+ };
221
+
222
+ } // end namespace internal
223
+
224
+ /** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the
225
+ * p-th powers of the absolute values of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity,
226
+ * this function returns the \f$ \ell^\infty \f$ norm, that is the maximum of the absolute values of the coefficients of
227
+ * \c *this.
228
+ *
229
+ * In all cases, if \c *this is empty, then the value 0 is returned.
230
+ *
231
+ * \note For matrices, this function does not compute the <a
232
+ * href="https://en.wikipedia.org/wiki/Operator_norm">operator-norm</a>. That is, if \c *this is a matrix, then its
233
+ * coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm
234
+ * matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink.
235
+ *
236
+ * \sa norm()
237
+ */
238
+ template <typename Derived>
239
+ template <int p>
240
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
241
+ EIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
242
+ #else
243
+ EIGEN_DEVICE_FUNC MatrixBase<Derived>::RealScalar
244
+ #endif
245
+ MatrixBase<Derived>::lpNorm() const {
246
+ return internal::lpNorm_selector<Derived, p>::run(*this);
247
+ }
248
+
249
+ //---------- implementation of isOrthogonal / isUnitary ----------
250
+
251
+ /** \returns true if *this is approximately orthogonal to \a other,
252
+ * within the precision given by \a prec.
253
+ *
254
+ * Example: \include MatrixBase_isOrthogonal.cpp
255
+ * Output: \verbinclude MatrixBase_isOrthogonal.out
256
+ */
257
+ template <typename Derived>
258
+ template <typename OtherDerived>
259
+ bool MatrixBase<Derived>::isOrthogonal(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const {
260
+ typename internal::nested_eval<Derived, 2>::type nested(derived());
261
+ typename internal::nested_eval<OtherDerived, 2>::type otherNested(other.derived());
262
+ return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
263
+ }
264
+
265
+ /** \returns true if *this is approximately an unitary matrix,
266
+ * within the precision given by \a prec. In the case where the \a Scalar
267
+ * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
268
+ *
269
+ * \note This can be used to check whether a family of vectors forms an orthonormal basis.
270
+ * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
271
+ * orthonormal basis.
272
+ *
273
+ * Example: \include MatrixBase_isUnitary.cpp
274
+ * Output: \verbinclude MatrixBase_isUnitary.out
275
+ */
276
+ template <typename Derived>
277
+ bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const {
278
+ typename internal::nested_eval<Derived, 1>::type self(derived());
279
+ for (Index i = 0; i < cols(); ++i) {
280
+ if (!internal::isApprox(self.col(i).squaredNorm(), static_cast<RealScalar>(1), prec)) return false;
281
+ for (Index j = 0; j < i; ++j)
282
+ if (!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast<Scalar>(1), prec)) return false;
283
+ }
284
+ return true;
285
+ }
286
+
287
+ } // end namespace Eigen
288
+
289
+ #endif // EIGEN_DOT_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ForceAlignedAccess.h ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_FORCEALIGNEDACCESS_H
11
+ #define EIGEN_FORCEALIGNEDACCESS_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ /** \class ForceAlignedAccess
19
+ * \ingroup Core_Module
20
+ *
21
+ * \brief Enforce aligned packet loads and stores regardless of what is requested
22
+ *
23
+ * \param ExpressionType the type of the object of which we are forcing aligned packet access
24
+ *
25
+ * This class is the return type of MatrixBase::forceAlignedAccess()
26
+ * and most of the time this is the only way it is used.
27
+ *
28
+ * \sa MatrixBase::forceAlignedAccess()
29
+ */
30
+
31
+ namespace internal {
32
+ template <typename ExpressionType>
33
+ struct traits<ForceAlignedAccess<ExpressionType>> : public traits<ExpressionType> {};
34
+ } // namespace internal
35
+
36
+ template <typename ExpressionType>
37
+ class ForceAlignedAccess : public internal::dense_xpr_base<ForceAlignedAccess<ExpressionType>>::type {
38
+ public:
39
+ typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base;
40
+ EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)
41
+
42
+ EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
43
+
44
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_expression.rows(); }
45
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_expression.cols(); }
46
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT {
47
+ return m_expression.outerStride();
48
+ }
49
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT {
50
+ return m_expression.innerStride();
51
+ }
52
+
53
+ EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const {
54
+ return m_expression.coeff(row, col);
55
+ }
56
+
57
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) {
58
+ return m_expression.const_cast_derived().coeffRef(row, col);
59
+ }
60
+
61
+ EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); }
62
+
63
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); }
64
+
65
+ template <int LoadMode>
66
+ inline const PacketScalar packet(Index row, Index col) const {
67
+ return m_expression.template packet<Aligned>(row, col);
68
+ }
69
+
70
+ template <int LoadMode>
71
+ inline void writePacket(Index row, Index col, const PacketScalar& x) {
72
+ m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);
73
+ }
74
+
75
+ template <int LoadMode>
76
+ inline const PacketScalar packet(Index index) const {
77
+ return m_expression.template packet<Aligned>(index);
78
+ }
79
+
80
+ template <int LoadMode>
81
+ inline void writePacket(Index index, const PacketScalar& x) {
82
+ m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
83
+ }
84
+
85
+ EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }
86
+
87
+ protected:
88
+ const ExpressionType& m_expression;
89
+
90
+ private:
91
+ ForceAlignedAccess& operator=(const ForceAlignedAccess&);
92
+ };
93
+
94
+ /** \returns an expression of *this with forced aligned access
95
+ * \sa forceAlignedAccessIf(),class ForceAlignedAccess
96
+ */
97
+ template <typename Derived>
98
+ inline const ForceAlignedAccess<Derived> MatrixBase<Derived>::forceAlignedAccess() const {
99
+ return ForceAlignedAccess<Derived>(derived());
100
+ }
101
+
102
+ /** \returns an expression of *this with forced aligned access
103
+ * \sa forceAlignedAccessIf(), class ForceAlignedAccess
104
+ */
105
+ template <typename Derived>
106
+ inline ForceAlignedAccess<Derived> MatrixBase<Derived>::forceAlignedAccess() {
107
+ return ForceAlignedAccess<Derived>(derived());
108
+ }
109
+
110
+ /** \returns an expression of *this with forced aligned access if \a Enable is true.
111
+ * \sa forceAlignedAccess(), class ForceAlignedAccess
112
+ */
113
+ template <typename Derived>
114
+ template <bool Enable>
115
+ inline add_const_on_value_type_t<std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&>>
116
+ MatrixBase<Derived>::forceAlignedAccessIf() const {
117
+ return derived(); // FIXME This should not work but apparently is never used
118
+ }
119
+
120
+ /** \returns an expression of *this with forced aligned access if \a Enable is true.
121
+ * \sa forceAlignedAccess(), class ForceAlignedAccess
122
+ */
123
+ template <typename Derived>
124
+ template <bool Enable>
125
+ inline std::conditional_t<Enable, ForceAlignedAccess<Derived>, Derived&> MatrixBase<Derived>::forceAlignedAccessIf() {
126
+ return derived(); // FIXME This should not work but apparently is never used
127
+ }
128
+
129
+ } // end namespace Eigen
130
+
131
+ #endif // EIGEN_FORCEALIGNEDACCESS_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Fuzzy.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_FUZZY_H
12
+ #define EIGEN_FUZZY_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+
21
+ template <typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
22
+ struct isApprox_selector {
23
+ EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) {
24
+ typename internal::nested_eval<Derived, 2>::type nested(x);
25
+ typename internal::nested_eval<OtherDerived, 2>::type otherNested(y);
26
+ return (nested.matrix() - otherNested.matrix()).cwiseAbs2().sum() <=
27
+ prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
28
+ }
29
+ };
30
+
31
+ template <typename Derived, typename OtherDerived>
32
+ struct isApprox_selector<Derived, OtherDerived, true> {
33
+ EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) {
34
+ return x.matrix() == y.matrix();
35
+ }
36
+ };
37
+
38
+ template <typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
39
+ struct isMuchSmallerThan_object_selector {
40
+ EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) {
41
+ return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();
42
+ }
43
+ };
44
+
45
+ template <typename Derived, typename OtherDerived>
46
+ struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true> {
47
+ EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) {
48
+ return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
49
+ }
50
+ };
51
+
52
+ template <typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
53
+ struct isMuchSmallerThan_scalar_selector {
54
+ EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar& y,
55
+ const typename Derived::RealScalar& prec) {
56
+ return x.cwiseAbs2().sum() <= numext::abs2(prec * y);
57
+ }
58
+ };
59
+
60
+ template <typename Derived>
61
+ struct isMuchSmallerThan_scalar_selector<Derived, true> {
62
+ EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar&,
63
+ const typename Derived::RealScalar&) {
64
+ return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
65
+ }
66
+ };
67
+
68
+ } // end namespace internal
69
+
70
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
71
+ * determined by \a prec.
72
+ *
73
+ * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
74
+ * are considered to be approximately equal within precision \f$ p \f$ if
75
+ * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
76
+ * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
77
+ * L2 norm).
78
+ *
79
+ * \note Because of the multiplicativeness of this comparison, one can't use this function
80
+ * to check whether \c *this is approximately equal to the zero matrix or vector.
81
+ * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
82
+ * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const
83
+ * RealScalar&, RealScalar) instead.
84
+ *
85
+ * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const
86
+ */
87
+ template <typename Derived>
88
+ template <typename OtherDerived>
89
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApprox(const DenseBase<OtherDerived>& other,
90
+ const RealScalar& prec) const {
91
+ return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
92
+ }
93
+
94
+ /** \returns \c true if the norm of \c *this is much smaller than \a other,
95
+ * within the precision determined by \a prec.
96
+ *
97
+ * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
98
+ * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
99
+ * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
100
+ *
101
+ * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
102
+ * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
103
+ * of a reference matrix of same dimensions.
104
+ *
105
+ * \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
106
+ */
107
+ template <typename Derived>
108
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(const typename NumTraits<Scalar>::Real& other,
109
+ const RealScalar& prec) const {
110
+ return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
111
+ }
112
+
113
+ /** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
114
+ * within the precision determined by \a prec.
115
+ *
116
+ * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
117
+ * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
118
+ * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
119
+ * For matrices, the comparison is done using the Hilbert-Schmidt norm.
120
+ *
121
+ * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
122
+ */
123
+ template <typename Derived>
124
+ template <typename OtherDerived>
125
+ EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(const DenseBase<OtherDerived>& other,
126
+ const RealScalar& prec) const {
127
+ return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
128
+ }
129
+
130
+ } // end namespace Eigen
131
+
132
+ #endif // EIGEN_FUZZY_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/GeneralProduct.h ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_GENERAL_PRODUCT_H
12
+ #define EIGEN_GENERAL_PRODUCT_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ enum { Large = 2, Small = 3 };
20
+
21
+ // Define the threshold value to fallback from the generic matrix-matrix product
22
+ // implementation (heavy) to the lightweight coeff-based product one.
23
+ // See generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
24
+ // in products/GeneralMatrixMatrix.h for more details.
25
+ // TODO This threshold should also be used in the compile-time selector below.
26
+ #ifndef EIGEN_GEMM_TO_COEFFBASED_THRESHOLD
27
+ // This default value has been obtained on a Haswell architecture.
28
+ #define EIGEN_GEMM_TO_COEFFBASED_THRESHOLD 20
29
+ #endif
30
+
31
+ namespace internal {
32
+
33
+ template <int Rows, int Cols, int Depth>
34
+ struct product_type_selector;
35
+
36
+ template <int Size, int MaxSize>
37
+ struct product_size_category {
38
+ enum {
39
+ #ifndef EIGEN_GPU_COMPILE_PHASE
40
+ is_large = MaxSize == Dynamic || Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ||
41
+ (Size == Dynamic && MaxSize >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD),
42
+ #else
43
+ is_large = 0,
44
+ #endif
45
+ value = is_large ? Large
46
+ : Size == 1 ? 1
47
+ : Small
48
+ };
49
+ };
50
+
51
+ template <typename Lhs, typename Rhs>
52
+ struct product_type {
53
+ typedef remove_all_t<Lhs> Lhs_;
54
+ typedef remove_all_t<Rhs> Rhs_;
55
+ enum {
56
+ MaxRows = traits<Lhs_>::MaxRowsAtCompileTime,
57
+ Rows = traits<Lhs_>::RowsAtCompileTime,
58
+ MaxCols = traits<Rhs_>::MaxColsAtCompileTime,
59
+ Cols = traits<Rhs_>::ColsAtCompileTime,
60
+ MaxDepth = min_size_prefer_fixed(traits<Lhs_>::MaxColsAtCompileTime, traits<Rhs_>::MaxRowsAtCompileTime),
61
+ Depth = min_size_prefer_fixed(traits<Lhs_>::ColsAtCompileTime, traits<Rhs_>::RowsAtCompileTime)
62
+ };
63
+
64
+ // the splitting into different lines of code here, introducing the _select enums and the typedef below,
65
+ // is to work around an internal compiler error with gcc 4.1 and 4.2.
66
+ private:
67
+ enum {
68
+ rows_select = product_size_category<Rows, MaxRows>::value,
69
+ cols_select = product_size_category<Cols, MaxCols>::value,
70
+ depth_select = product_size_category<Depth, MaxDepth>::value
71
+ };
72
+ typedef product_type_selector<rows_select, cols_select, depth_select> selector;
73
+
74
+ public:
75
+ enum { value = selector::ret, ret = selector::ret };
76
+ #ifdef EIGEN_DEBUG_PRODUCT
77
+ static void debug() {
78
+ EIGEN_DEBUG_VAR(Rows);
79
+ EIGEN_DEBUG_VAR(Cols);
80
+ EIGEN_DEBUG_VAR(Depth);
81
+ EIGEN_DEBUG_VAR(rows_select);
82
+ EIGEN_DEBUG_VAR(cols_select);
83
+ EIGEN_DEBUG_VAR(depth_select);
84
+ EIGEN_DEBUG_VAR(value);
85
+ }
86
+ #endif
87
+ };
88
+
89
+ /* The following allows to select the kind of product at compile time
90
+ * based on the three dimensions of the product.
91
+ * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
92
+ // FIXME I'm not sure the current mapping is the ideal one.
93
+ template <int M, int N>
94
+ struct product_type_selector<M, N, 1> {
95
+ enum { ret = OuterProduct };
96
+ };
97
+ template <int M>
98
+ struct product_type_selector<M, 1, 1> {
99
+ enum { ret = LazyCoeffBasedProductMode };
100
+ };
101
+ template <int N>
102
+ struct product_type_selector<1, N, 1> {
103
+ enum { ret = LazyCoeffBasedProductMode };
104
+ };
105
+ template <int Depth>
106
+ struct product_type_selector<1, 1, Depth> {
107
+ enum { ret = InnerProduct };
108
+ };
109
+ template <>
110
+ struct product_type_selector<1, 1, 1> {
111
+ enum { ret = InnerProduct };
112
+ };
113
+ template <>
114
+ struct product_type_selector<Small, 1, Small> {
115
+ enum { ret = CoeffBasedProductMode };
116
+ };
117
+ template <>
118
+ struct product_type_selector<1, Small, Small> {
119
+ enum { ret = CoeffBasedProductMode };
120
+ };
121
+ template <>
122
+ struct product_type_selector<Small, Small, Small> {
123
+ enum { ret = CoeffBasedProductMode };
124
+ };
125
+ template <>
126
+ struct product_type_selector<Small, Small, 1> {
127
+ enum { ret = LazyCoeffBasedProductMode };
128
+ };
129
+ template <>
130
+ struct product_type_selector<Small, Large, 1> {
131
+ enum { ret = LazyCoeffBasedProductMode };
132
+ };
133
+ template <>
134
+ struct product_type_selector<Large, Small, 1> {
135
+ enum { ret = LazyCoeffBasedProductMode };
136
+ };
137
+ template <>
138
+ struct product_type_selector<1, Large, Small> {
139
+ enum { ret = CoeffBasedProductMode };
140
+ };
141
+ template <>
142
+ struct product_type_selector<1, Large, Large> {
143
+ enum { ret = GemvProduct };
144
+ };
145
+ template <>
146
+ struct product_type_selector<1, Small, Large> {
147
+ enum { ret = CoeffBasedProductMode };
148
+ };
149
+ template <>
150
+ struct product_type_selector<Large, 1, Small> {
151
+ enum { ret = CoeffBasedProductMode };
152
+ };
153
+ template <>
154
+ struct product_type_selector<Large, 1, Large> {
155
+ enum { ret = GemvProduct };
156
+ };
157
+ template <>
158
+ struct product_type_selector<Small, 1, Large> {
159
+ enum { ret = CoeffBasedProductMode };
160
+ };
161
+ template <>
162
+ struct product_type_selector<Small, Small, Large> {
163
+ enum { ret = GemmProduct };
164
+ };
165
+ template <>
166
+ struct product_type_selector<Large, Small, Large> {
167
+ enum { ret = GemmProduct };
168
+ };
169
+ template <>
170
+ struct product_type_selector<Small, Large, Large> {
171
+ enum { ret = GemmProduct };
172
+ };
173
+ template <>
174
+ struct product_type_selector<Large, Large, Large> {
175
+ enum { ret = GemmProduct };
176
+ };
177
+ template <>
178
+ struct product_type_selector<Large, Small, Small> {
179
+ enum { ret = CoeffBasedProductMode };
180
+ };
181
+ template <>
182
+ struct product_type_selector<Small, Large, Small> {
183
+ enum { ret = CoeffBasedProductMode };
184
+ };
185
+ template <>
186
+ struct product_type_selector<Large, Large, Small> {
187
+ enum { ret = GemmProduct };
188
+ };
189
+
190
+ } // end namespace internal
191
+
192
+ /***********************************************************************
193
+ * Implementation of Inner Vector Vector Product
194
+ ***********************************************************************/
195
+
196
+ // FIXME : maybe the "inner product" could return a Scalar
197
+ // instead of a 1x1 matrix ??
198
+ // Pro: more natural for the user
199
+ // Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
200
+ // product ends up to a row-vector times col-vector product... To tackle this use
201
+ // case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
202
+
203
+ /***********************************************************************
204
+ * Implementation of Outer Vector Vector Product
205
+ ***********************************************************************/
206
+
207
+ /***********************************************************************
208
+ * Implementation of General Matrix Vector Product
209
+ ***********************************************************************/
210
+
211
+ /* According to the shape/flags of the matrix we have to distinghish 3 different cases:
212
+ * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
213
+ * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
214
+ * 3 - all other cases are handled using a simple loop along the outer-storage direction.
215
+ * Therefore we need a lower level meta selector.
216
+ * Furthermore, if the matrix is the rhs, then the product has to be transposed.
217
+ */
218
+ namespace internal {
219
+
220
+ template <int Side, int StorageOrder, bool BlasCompatible>
221
+ struct gemv_dense_selector;
222
+
223
+ } // end namespace internal
224
+
225
+ namespace internal {
226
+
227
+ template <typename Scalar, int Size, int MaxSize, bool Cond>
228
+ struct gemv_static_vector_if;
229
+
230
+ template <typename Scalar, int Size, int MaxSize>
231
+ struct gemv_static_vector_if<Scalar, Size, MaxSize, false> {
232
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() {
233
+ eigen_internal_assert(false && "should never be called");
234
+ return 0;
235
+ }
236
+ };
237
+
238
+ template <typename Scalar, int Size>
239
+ struct gemv_static_vector_if<Scalar, Size, Dynamic, true> {
240
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Scalar* data() { return 0; }
241
+ };
242
+
243
+ template <typename Scalar, int Size, int MaxSize>
244
+ struct gemv_static_vector_if<Scalar, Size, MaxSize, true> {
245
+ #if EIGEN_MAX_STATIC_ALIGN_BYTES != 0
246
+ internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize), 0, AlignedMax> m_data;
247
+ EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
248
+ #else
249
+ // Some architectures cannot align on the stack,
250
+ // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
251
+ internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize) + EIGEN_MAX_ALIGN_BYTES, 0> m_data;
252
+ EIGEN_STRONG_INLINE Scalar* data() {
253
+ return reinterpret_cast<Scalar*>((std::uintptr_t(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES - 1))) +
254
+ EIGEN_MAX_ALIGN_BYTES);
255
+ }
256
+ #endif
257
+ };
258
+
259
+ // The vector is on the left => transposition
260
+ template <int StorageOrder, bool BlasCompatible>
261
+ struct gemv_dense_selector<OnTheLeft, StorageOrder, BlasCompatible> {
262
+ template <typename Lhs, typename Rhs, typename Dest>
263
+ static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
264
+ Transpose<Dest> destT(dest);
265
+ enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
266
+ gemv_dense_selector<OnTheRight, OtherStorageOrder, BlasCompatible>::run(rhs.transpose(), lhs.transpose(), destT,
267
+ alpha);
268
+ }
269
+ };
270
+
271
+ template <>
272
+ struct gemv_dense_selector<OnTheRight, ColMajor, true> {
273
+ template <typename Lhs, typename Rhs, typename Dest>
274
+ static inline void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
275
+ typedef typename Lhs::Scalar LhsScalar;
276
+ typedef typename Rhs::Scalar RhsScalar;
277
+ typedef typename Dest::Scalar ResScalar;
278
+
279
+ typedef internal::blas_traits<Lhs> LhsBlasTraits;
280
+ typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
281
+ typedef internal::blas_traits<Rhs> RhsBlasTraits;
282
+ typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
283
+
284
+ typedef Map<Matrix<ResScalar, Dynamic, 1>, plain_enum_min(AlignedMax, internal::packet_traits<ResScalar>::size)>
285
+ MappedDest;
286
+
287
+ ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
288
+ ActualRhsType actualRhs = RhsBlasTraits::extract(rhs);
289
+
290
+ ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
291
+
292
+ // make sure Dest is a compile-time vector type (bug 1166)
293
+ typedef std::conditional_t<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr> ActualDest;
294
+
295
+ enum {
296
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
297
+ // on, the other hand it is good for the cache to pack the vector anyways...
298
+ EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime == 1),
299
+ ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
300
+ MightCannotUseDest = ((!EvalToDestAtCompileTime) || ComplexByReal) && (ActualDest::MaxSizeAtCompileTime != 0)
301
+ };
302
+
303
+ typedef const_blas_data_mapper<LhsScalar, Index, ColMajor> LhsMapper;
304
+ typedef const_blas_data_mapper<RhsScalar, Index, RowMajor> RhsMapper;
305
+ RhsScalar compatibleAlpha = get_factor<ResScalar, RhsScalar>::run(actualAlpha);
306
+
307
+ if (!MightCannotUseDest) {
308
+ // shortcut if we are sure to be able to use dest directly,
309
+ // this ease the compiler to generate cleaner and more optimzized code for most common cases
310
+ general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, LhsBlasTraits::NeedToConjugate, RhsScalar,
311
+ RhsMapper, RhsBlasTraits::NeedToConjugate>::run(actualLhs.rows(), actualLhs.cols(),
312
+ LhsMapper(actualLhs.data(),
313
+ actualLhs.outerStride()),
314
+ RhsMapper(actualRhs.data(),
315
+ actualRhs.innerStride()),
316
+ dest.data(), 1, compatibleAlpha);
317
+ } else {
318
+ gemv_static_vector_if<ResScalar, ActualDest::SizeAtCompileTime, ActualDest::MaxSizeAtCompileTime,
319
+ MightCannotUseDest>
320
+ static_dest;
321
+
322
+ const bool alphaIsCompatible = (!ComplexByReal) || (numext::is_exactly_zero(numext::imag(actualAlpha)));
323
+ const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
324
+
325
+ ei_declare_aligned_stack_constructed_variable(ResScalar, actualDestPtr, dest.size(),
326
+ evalToDest ? dest.data() : static_dest.data());
327
+
328
+ if (!evalToDest) {
329
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
330
+ Index size = dest.size();
331
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
332
+ #endif
333
+ if (!alphaIsCompatible) {
334
+ MappedDest(actualDestPtr, dest.size()).setZero();
335
+ compatibleAlpha = RhsScalar(1);
336
+ } else
337
+ MappedDest(actualDestPtr, dest.size()) = dest;
338
+ }
339
+
340
+ general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, LhsBlasTraits::NeedToConjugate, RhsScalar,
341
+ RhsMapper, RhsBlasTraits::NeedToConjugate>::run(actualLhs.rows(), actualLhs.cols(),
342
+ LhsMapper(actualLhs.data(),
343
+ actualLhs.outerStride()),
344
+ RhsMapper(actualRhs.data(),
345
+ actualRhs.innerStride()),
346
+ actualDestPtr, 1, compatibleAlpha);
347
+
348
+ if (!evalToDest) {
349
+ if (!alphaIsCompatible)
350
+ dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size());
351
+ else
352
+ dest = MappedDest(actualDestPtr, dest.size());
353
+ }
354
+ }
355
+ }
356
+ };
357
+
358
+ template <>
359
+ struct gemv_dense_selector<OnTheRight, RowMajor, true> {
360
+ template <typename Lhs, typename Rhs, typename Dest>
361
+ static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
362
+ typedef typename Lhs::Scalar LhsScalar;
363
+ typedef typename Rhs::Scalar RhsScalar;
364
+ typedef typename Dest::Scalar ResScalar;
365
+
366
+ typedef internal::blas_traits<Lhs> LhsBlasTraits;
367
+ typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
368
+ typedef internal::blas_traits<Rhs> RhsBlasTraits;
369
+ typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
370
+ typedef internal::remove_all_t<ActualRhsType> ActualRhsTypeCleaned;
371
+
372
+ std::add_const_t<ActualLhsType> actualLhs = LhsBlasTraits::extract(lhs);
373
+ std::add_const_t<ActualRhsType> actualRhs = RhsBlasTraits::extract(rhs);
374
+
375
+ ResScalar actualAlpha = combine_scalar_factors(alpha, lhs, rhs);
376
+
377
+ enum {
378
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
379
+ // on, the other hand it is good for the cache to pack the vector anyways...
380
+ DirectlyUseRhs =
381
+ ActualRhsTypeCleaned::InnerStrideAtCompileTime == 1 || ActualRhsTypeCleaned::MaxSizeAtCompileTime == 0
382
+ };
383
+
384
+ gemv_static_vector_if<RhsScalar, ActualRhsTypeCleaned::SizeAtCompileTime,
385
+ ActualRhsTypeCleaned::MaxSizeAtCompileTime, !DirectlyUseRhs>
386
+ static_rhs;
387
+
388
+ ei_declare_aligned_stack_constructed_variable(
389
+ RhsScalar, actualRhsPtr, actualRhs.size(),
390
+ DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
391
+
392
+ if (!DirectlyUseRhs) {
393
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
394
+ Index size = actualRhs.size();
395
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
396
+ #endif
397
+ Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
398
+ }
399
+
400
+ typedef const_blas_data_mapper<LhsScalar, Index, RowMajor> LhsMapper;
401
+ typedef const_blas_data_mapper<RhsScalar, Index, ColMajor> RhsMapper;
402
+ general_matrix_vector_product<Index, LhsScalar, LhsMapper, RowMajor, LhsBlasTraits::NeedToConjugate, RhsScalar,
403
+ RhsMapper, RhsBlasTraits::NeedToConjugate>::
404
+ run(actualLhs.rows(), actualLhs.cols(), LhsMapper(actualLhs.data(), actualLhs.outerStride()),
405
+ RhsMapper(actualRhsPtr, 1), dest.data(),
406
+ dest.col(0).innerStride(), // NOTE if dest is not a vector at compile-time, then dest.innerStride() might
407
+ // be wrong. (bug 1166)
408
+ actualAlpha);
409
+ }
410
+ };
411
+
412
+ template <>
413
+ struct gemv_dense_selector<OnTheRight, ColMajor, false> {
414
+ template <typename Lhs, typename Rhs, typename Dest>
415
+ static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
416
+ EIGEN_STATIC_ASSERT((!nested_eval<Lhs, 1>::Evaluate),
417
+ EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);
418
+ // TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory,
419
+ // otherwise use a temp
420
+ typename nested_eval<Rhs, 1>::type actual_rhs(rhs);
421
+ const Index size = rhs.rows();
422
+ for (Index k = 0; k < size; ++k) dest += (alpha * actual_rhs.coeff(k)) * lhs.col(k);
423
+ }
424
+ };
425
+
426
+ template <>
427
+ struct gemv_dense_selector<OnTheRight, RowMajor, false> {
428
+ template <typename Lhs, typename Rhs, typename Dest>
429
+ static void run(const Lhs& lhs, const Rhs& rhs, Dest& dest, const typename Dest::Scalar& alpha) {
430
+ EIGEN_STATIC_ASSERT((!nested_eval<Lhs, 1>::Evaluate),
431
+ EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);
432
+ typename nested_eval<Rhs, Lhs::RowsAtCompileTime>::type actual_rhs(rhs);
433
+ const Index rows = dest.rows();
434
+ for (Index i = 0; i < rows; ++i)
435
+ dest.coeffRef(i) += alpha * (lhs.row(i).cwiseProduct(actual_rhs.transpose())).sum();
436
+ }
437
+ };
438
+
439
+ } // end namespace internal
440
+
441
+ /***************************************************************************
442
+ * Implementation of matrix base methods
443
+ ***************************************************************************/
444
+
445
+ /** \returns the matrix product of \c *this and \a other.
446
+ *
447
+ * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
448
+ *
449
+ * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
450
+ */
451
+ template <typename Derived>
452
+ template <typename OtherDerived>
453
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product<Derived, OtherDerived> MatrixBase<Derived>::operator*(
454
+ const MatrixBase<OtherDerived>& other) const {
455
+ // A note regarding the function declaration: In MSVC, this function will sometimes
456
+ // not be inlined since DenseStorage is an unwindable object for dynamic
457
+ // matrices and product types are holding a member to store the result.
458
+ // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
459
+ enum {
460
+ ProductIsValid = Derived::ColsAtCompileTime == Dynamic || OtherDerived::RowsAtCompileTime == Dynamic ||
461
+ int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime),
462
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
463
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived, OtherDerived)
464
+ };
465
+ // note to the lost user:
466
+ // * for a dot product use: v1.dot(v2)
467
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
468
+ EIGEN_STATIC_ASSERT(
469
+ ProductIsValid || !(AreVectors && SameSizes),
470
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
471
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
472
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
473
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
474
+ #ifdef EIGEN_DEBUG_PRODUCT
475
+ internal::product_type<Derived, OtherDerived>::debug();
476
+ #endif
477
+
478
+ return Product<Derived, OtherDerived>(derived(), other.derived());
479
+ }
480
+
481
+ /** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
482
+ *
483
+ * The returned product will behave like any other expressions: the coefficients of the product will be
484
+ * computed once at a time as requested. This might be useful in some extremely rare cases when only
485
+ * a small and no coherent fraction of the result's coefficients have to be computed.
486
+ *
487
+ * \warning This version of the matrix product can be much much slower. So use it only if you know
488
+ * what you are doing and that you measured a true speed improvement.
489
+ *
490
+ * \sa operator*(const MatrixBase&)
491
+ */
492
+ template <typename Derived>
493
+ template <typename OtherDerived>
494
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Product<Derived, OtherDerived, LazyProduct>
495
+ MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived>& other) const {
496
+ enum {
497
+ ProductIsValid = Derived::ColsAtCompileTime == Dynamic || OtherDerived::RowsAtCompileTime == Dynamic ||
498
+ int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime),
499
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
500
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived, OtherDerived)
501
+ };
502
+ // note to the lost user:
503
+ // * for a dot product use: v1.dot(v2)
504
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
505
+ EIGEN_STATIC_ASSERT(
506
+ ProductIsValid || !(AreVectors && SameSizes),
507
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
508
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
509
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
510
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
511
+
512
+ return Product<Derived, OtherDerived, LazyProduct>(derived(), other.derived());
513
+ }
514
+
515
+ } // end namespace Eigen
516
+
517
+ #endif // EIGEN_PRODUCT_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/GenericPacketMath.h ADDED
@@ -0,0 +1,1527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_GENERIC_PACKET_MATH_H
12
+ #define EIGEN_GENERIC_PACKET_MATH_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+
21
+ /** \internal
22
+ * \file GenericPacketMath.h
23
+ *
24
+ * Default implementation for types not supported by the vectorization.
25
+ * In practice these functions are provided to make easier the writing
26
+ * of generic vectorized code.
27
+ */
28
+
29
+ #ifndef EIGEN_DEBUG_ALIGNED_LOAD
30
+ #define EIGEN_DEBUG_ALIGNED_LOAD
31
+ #endif
32
+
33
+ #ifndef EIGEN_DEBUG_UNALIGNED_LOAD
34
+ #define EIGEN_DEBUG_UNALIGNED_LOAD
35
+ #endif
36
+
37
+ #ifndef EIGEN_DEBUG_ALIGNED_STORE
38
+ #define EIGEN_DEBUG_ALIGNED_STORE
39
+ #endif
40
+
41
+ #ifndef EIGEN_DEBUG_UNALIGNED_STORE
42
+ #define EIGEN_DEBUG_UNALIGNED_STORE
43
+ #endif
44
+
45
+ struct default_packet_traits {
46
+ enum {
47
+ // Ops that are implemented for most types.
48
+ HasAdd = 1,
49
+ HasSub = 1,
50
+ HasShift = 1,
51
+ HasMul = 1,
52
+ HasNegate = 1,
53
+ HasAbs = 1,
54
+ HasAbs2 = 1,
55
+ HasMin = 1,
56
+ HasMax = 1,
57
+ HasConj = 1,
58
+ HasSetLinear = 1,
59
+ HasSign = 1,
60
+ // By default, the nearest integer functions (rint, round, floor, ceil, trunc) are enabled for all scalar and packet
61
+ // types
62
+ HasRound = 1,
63
+
64
+ HasArg = 0,
65
+ HasAbsDiff = 0,
66
+ HasBlend = 0,
67
+ // This flag is used to indicate whether packet comparison is supported.
68
+ // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true.
69
+ HasCmp = 0,
70
+
71
+ HasDiv = 0,
72
+ HasReciprocal = 0,
73
+ HasSqrt = 0,
74
+ HasRsqrt = 0,
75
+ HasExp = 0,
76
+ HasExpm1 = 0,
77
+ HasLog = 0,
78
+ HasLog1p = 0,
79
+ HasLog10 = 0,
80
+ HasPow = 0,
81
+ HasSin = 0,
82
+ HasCos = 0,
83
+ HasTan = 0,
84
+ HasASin = 0,
85
+ HasACos = 0,
86
+ HasATan = 0,
87
+ HasATanh = 0,
88
+ HasSinh = 0,
89
+ HasCosh = 0,
90
+ HasTanh = 0,
91
+ HasLGamma = 0,
92
+ HasDiGamma = 0,
93
+ HasZeta = 0,
94
+ HasPolygamma = 0,
95
+ HasErf = 0,
96
+ HasErfc = 0,
97
+ HasNdtri = 0,
98
+ HasBessel = 0,
99
+ HasIGamma = 0,
100
+ HasIGammaDerA = 0,
101
+ HasGammaSampleDerAlpha = 0,
102
+ HasIGammac = 0,
103
+ HasBetaInc = 0
104
+ };
105
+ };
106
+
107
+ template <typename T>
108
+ struct packet_traits : default_packet_traits {
109
+ typedef T type;
110
+ typedef T half;
111
+ enum {
112
+ Vectorizable = 0,
113
+ size = 1,
114
+ AlignedOnScalar = 0,
115
+ };
116
+ enum {
117
+ HasAdd = 0,
118
+ HasSub = 0,
119
+ HasMul = 0,
120
+ HasNegate = 0,
121
+ HasAbs = 0,
122
+ HasAbs2 = 0,
123
+ HasMin = 0,
124
+ HasMax = 0,
125
+ HasConj = 0,
126
+ HasSetLinear = 0
127
+ };
128
+ };
129
+
130
+ template <typename T>
131
+ struct packet_traits<const T> : packet_traits<T> {};
132
+
133
+ template <typename T>
134
+ struct unpacket_traits {
135
+ typedef T type;
136
+ typedef T half;
137
+ typedef typename numext::get_integer_by_size<sizeof(T)>::signed_type integer_packet;
138
+ enum {
139
+ size = 1,
140
+ alignment = alignof(T),
141
+ vectorizable = false,
142
+ masked_load_available = false,
143
+ masked_store_available = false
144
+ };
145
+ };
146
+
147
+ template <typename T>
148
+ struct unpacket_traits<const T> : unpacket_traits<T> {};
149
+
150
+ /** \internal A convenience utility for determining if the type is a scalar.
151
+ * This is used to enable some generic packet implementations.
152
+ */
153
+ template <typename Packet>
154
+ struct is_scalar {
155
+ using Scalar = typename unpacket_traits<Packet>::type;
156
+ enum { value = internal::is_same<Packet, Scalar>::value };
157
+ };
158
+
159
+ // automatically and succinctly define combinations of pcast<SrcPacket,TgtPacket> when
160
+ // 1) the packets are the same type, or
161
+ // 2) the packets differ only in sign.
162
+ // In both of these cases, preinterpret (bit_cast) is equivalent to pcast (static_cast)
163
+ template <typename SrcPacket, typename TgtPacket,
164
+ bool Scalar = is_scalar<SrcPacket>::value && is_scalar<TgtPacket>::value>
165
+ struct is_degenerate_helper : is_same<SrcPacket, TgtPacket> {};
166
+ template <>
167
+ struct is_degenerate_helper<int8_t, uint8_t, true> : std::true_type {};
168
+ template <>
169
+ struct is_degenerate_helper<int16_t, uint16_t, true> : std::true_type {};
170
+ template <>
171
+ struct is_degenerate_helper<int32_t, uint32_t, true> : std::true_type {};
172
+ template <>
173
+ struct is_degenerate_helper<int64_t, uint64_t, true> : std::true_type {};
174
+
175
+ template <typename SrcPacket, typename TgtPacket>
176
+ struct is_degenerate_helper<SrcPacket, TgtPacket, false> {
177
+ using SrcScalar = typename unpacket_traits<SrcPacket>::type;
178
+ static constexpr int SrcSize = unpacket_traits<SrcPacket>::size;
179
+ using TgtScalar = typename unpacket_traits<TgtPacket>::type;
180
+ static constexpr int TgtSize = unpacket_traits<TgtPacket>::size;
181
+ static constexpr bool value = is_degenerate_helper<SrcScalar, TgtScalar, true>::value && (SrcSize == TgtSize);
182
+ };
183
+
184
+ // is_degenerate<T1,T2>::value == is_degenerate<T2,T1>::value
185
+ template <typename SrcPacket, typename TgtPacket>
186
+ struct is_degenerate {
187
+ static constexpr bool value =
188
+ is_degenerate_helper<SrcPacket, TgtPacket>::value || is_degenerate_helper<TgtPacket, SrcPacket>::value;
189
+ };
190
+
191
+ template <typename Packet>
192
+ struct is_half {
193
+ using Scalar = typename unpacket_traits<Packet>::type;
194
+ static constexpr int Size = unpacket_traits<Packet>::size;
195
+ using DefaultPacket = typename packet_traits<Scalar>::type;
196
+ static constexpr int DefaultSize = unpacket_traits<DefaultPacket>::size;
197
+ static constexpr bool value = Size < DefaultSize;
198
+ };
199
+
200
+ template <typename Src, typename Tgt>
201
+ struct type_casting_traits {
202
+ enum {
203
+ VectorizedCast =
204
+ is_degenerate<Src, Tgt>::value && packet_traits<Src>::Vectorizable && packet_traits<Tgt>::Vectorizable,
205
+ SrcCoeffRatio = 1,
206
+ TgtCoeffRatio = 1
207
+ };
208
+ };
209
+
210
+ // provides a succint template to define vectorized casting traits with respect to the largest accessible packet types
211
+ template <typename Src, typename Tgt>
212
+ struct vectorized_type_casting_traits {
213
+ enum : int {
214
+ DefaultSrcPacketSize = packet_traits<Src>::size,
215
+ DefaultTgtPacketSize = packet_traits<Tgt>::size,
216
+ VectorizedCast = 1,
217
+ SrcCoeffRatio = plain_enum_max(DefaultTgtPacketSize / DefaultSrcPacketSize, 1),
218
+ TgtCoeffRatio = plain_enum_max(DefaultSrcPacketSize / DefaultTgtPacketSize, 1)
219
+ };
220
+ };
221
+
222
+ /** \internal Wrapper to ensure that multiple packet types can map to the same
223
+ same underlying vector type. */
224
+ template <typename T, int unique_id = 0>
225
+ struct eigen_packet_wrapper {
226
+ EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
227
+ EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
228
+ EIGEN_ALWAYS_INLINE eigen_packet_wrapper() = default;
229
+ EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T& v) : m_val(v) {}
230
+ EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T& v) {
231
+ m_val = v;
232
+ return *this;
233
+ }
234
+
235
+ T m_val;
236
+ };
237
+
238
+ template <typename Target, typename Packet, bool IsSame = is_same<Target, Packet>::value>
239
+ struct preinterpret_generic;
240
+
241
+ template <typename Target, typename Packet>
242
+ struct preinterpret_generic<Target, Packet, false> {
243
+ // the packets are not the same, attempt scalar bit_cast
244
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Target run(const Packet& a) {
245
+ return numext::bit_cast<Target, Packet>(a);
246
+ }
247
+ };
248
+
249
+ template <typename Packet>
250
+ struct preinterpret_generic<Packet, Packet, true> {
251
+ // the packets are the same type: do nothing
252
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& a) { return a; }
253
+ };
254
+
255
+ /** \internal \returns reinterpret_cast<Target>(a) */
256
+ template <typename Target, typename Packet>
257
+ EIGEN_DEVICE_FUNC inline Target preinterpret(const Packet& a) {
258
+ return preinterpret_generic<Target, Packet>::run(a);
259
+ }
260
+
261
+ template <typename SrcPacket, typename TgtPacket, bool Degenerate = is_degenerate<SrcPacket, TgtPacket>::value,
262
+ bool TgtIsHalf = is_half<TgtPacket>::value>
263
+ struct pcast_generic;
264
+
265
+ template <typename SrcPacket, typename TgtPacket>
266
+ struct pcast_generic<SrcPacket, TgtPacket, false, false> {
267
+ // the packets are not degenerate: attempt scalar static_cast
268
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket run(const SrcPacket& a) {
269
+ return cast_impl<SrcPacket, TgtPacket>::run(a);
270
+ }
271
+ };
272
+
273
+ template <typename Packet>
274
+ struct pcast_generic<Packet, Packet, true, false> {
275
+ // the packets are the same: do nothing
276
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& a) { return a; }
277
+ };
278
+
279
+ template <typename SrcPacket, typename TgtPacket, bool TgtIsHalf>
280
+ struct pcast_generic<SrcPacket, TgtPacket, true, TgtIsHalf> {
281
+ // the packets are degenerate: preinterpret is equivalent to pcast
282
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket run(const SrcPacket& a) { return preinterpret<TgtPacket>(a); }
283
+ };
284
+
285
+ /** \internal \returns static_cast<TgtType>(a) (coeff-wise) */
286
+ template <typename SrcPacket, typename TgtPacket>
287
+ EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a) {
288
+ return pcast_generic<SrcPacket, TgtPacket>::run(a);
289
+ }
290
+ template <typename SrcPacket, typename TgtPacket>
291
+ EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& b) {
292
+ return pcast_generic<SrcPacket, TgtPacket>::run(a, b);
293
+ }
294
+ template <typename SrcPacket, typename TgtPacket>
295
+ EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& b, const SrcPacket& c,
296
+ const SrcPacket& d) {
297
+ return pcast_generic<SrcPacket, TgtPacket>::run(a, b, c, d);
298
+ }
299
+ template <typename SrcPacket, typename TgtPacket>
300
+ EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& b, const SrcPacket& c, const SrcPacket& d,
301
+ const SrcPacket& e, const SrcPacket& f, const SrcPacket& g,
302
+ const SrcPacket& h) {
303
+ return pcast_generic<SrcPacket, TgtPacket>::run(a, b, c, d, e, f, g, h);
304
+ }
305
+
306
+ template <typename SrcPacket, typename TgtPacket>
307
+ struct pcast_generic<SrcPacket, TgtPacket, false, true> {
308
+ // TgtPacket is a half packet of some other type
309
+ // perform cast and truncate result
310
+ using DefaultTgtPacket = typename is_half<TgtPacket>::DefaultPacket;
311
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket run(const SrcPacket& a) {
312
+ return preinterpret<TgtPacket>(pcast<SrcPacket, DefaultTgtPacket>(a));
313
+ }
314
+ };
315
+
316
+ /** \internal \returns a + b (coeff-wise) */
317
+ template <typename Packet>
318
+ EIGEN_DEVICE_FUNC inline Packet padd(const Packet& a, const Packet& b) {
319
+ return a + b;
320
+ }
321
+ // Avoid compiler warning for boolean algebra.
322
+ template <>
323
+ EIGEN_DEVICE_FUNC inline bool padd(const bool& a, const bool& b) {
324
+ return a || b;
325
+ }
326
+
327
+ /** \internal \returns a packet version of \a *from, (un-aligned masked add)
328
+ * There is no generic implementation. We only have implementations for specialized
329
+ * cases. Generic case should not be called.
330
+ */
331
+ template <typename Packet>
332
+ EIGEN_DEVICE_FUNC inline std::enable_if_t<unpacket_traits<Packet>::masked_fpops_available, Packet> padd(
333
+ const Packet& a, const Packet& b, typename unpacket_traits<Packet>::mask_t umask);
334
+
335
+ /** \internal \returns a - b (coeff-wise) */
336
+ template <typename Packet>
337
+ EIGEN_DEVICE_FUNC inline Packet psub(const Packet& a, const Packet& b) {
338
+ return a - b;
339
+ }
340
+
341
+ /** \internal \returns -a (coeff-wise) */
342
+ template <typename Packet>
343
+ EIGEN_DEVICE_FUNC inline Packet pnegate(const Packet& a) {
344
+ EIGEN_STATIC_ASSERT((!is_same<typename unpacket_traits<Packet>::type, bool>::value),
345
+ NEGATE IS NOT DEFINED FOR BOOLEAN TYPES)
346
+ return numext::negate(a);
347
+ }
348
+
349
+ /** \internal \returns conj(a) (coeff-wise) */
350
+ template <typename Packet>
351
+ EIGEN_DEVICE_FUNC inline Packet pconj(const Packet& a) {
352
+ return numext::conj(a);
353
+ }
354
+
355
+ /** \internal \returns a * b (coeff-wise) */
356
+ template <typename Packet>
357
+ EIGEN_DEVICE_FUNC inline Packet pmul(const Packet& a, const Packet& b) {
358
+ return a * b;
359
+ }
360
+ // Avoid compiler warning for boolean algebra.
361
+ template <>
362
+ EIGEN_DEVICE_FUNC inline bool pmul(const bool& a, const bool& b) {
363
+ return a && b;
364
+ }
365
+
366
+ /** \internal \returns a / b (coeff-wise) */
367
+ template <typename Packet>
368
+ EIGEN_DEVICE_FUNC inline Packet pdiv(const Packet& a, const Packet& b) {
369
+ return a / b;
370
+ }
371
+
372
+ // In the generic case, memset to all one bits.
373
+ template <typename Packet, typename EnableIf = void>
374
+ struct ptrue_impl {
375
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) {
376
+ Packet b;
377
+ memset(static_cast<void*>(&b), 0xff, sizeof(Packet));
378
+ return b;
379
+ }
380
+ };
381
+
382
+ // For booleans, we can only directly set a valid `bool` value to avoid UB.
383
+ template <>
384
+ struct ptrue_impl<bool, void> {
385
+ static EIGEN_DEVICE_FUNC inline bool run(const bool& /*a*/) { return true; }
386
+ };
387
+
388
+ // For non-trivial scalars, set to Scalar(1) (i.e. a non-zero value).
389
+ // Although this is technically not a valid bitmask, the scalar path for pselect
390
+ // uses a comparison to zero, so this should still work in most cases. We don't
391
+ // have another option, since the scalar type requires initialization.
392
+ template <typename T>
393
+ struct ptrue_impl<T, std::enable_if_t<is_scalar<T>::value && NumTraits<T>::RequireInitialization>> {
394
+ static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) { return T(1); }
395
+ };
396
+
397
+ /** \internal \returns one bits. */
398
+ template <typename Packet>
399
+ EIGEN_DEVICE_FUNC inline Packet ptrue(const Packet& a) {
400
+ return ptrue_impl<Packet>::run(a);
401
+ }
402
+
403
+ // In the general case, memset to zero.
404
+ template <typename Packet, typename EnableIf = void>
405
+ struct pzero_impl {
406
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& /*a*/) {
407
+ Packet b;
408
+ memset(static_cast<void*>(&b), 0x00, sizeof(Packet));
409
+ return b;
410
+ }
411
+ };
412
+
413
+ // For scalars, explicitly set to Scalar(0), since the underlying representation
414
+ // for zero may not consist of all-zero bits.
415
+ template <typename T>
416
+ struct pzero_impl<T, std::enable_if_t<is_scalar<T>::value>> {
417
+ static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/) { return T(0); }
418
+ };
419
+
420
+ /** \internal \returns packet of zeros */
421
+ template <typename Packet>
422
+ EIGEN_DEVICE_FUNC inline Packet pzero(const Packet& a) {
423
+ return pzero_impl<Packet>::run(a);
424
+ }
425
+
426
+ /** \internal \returns a <= b as a bit mask */
427
+ template <typename Packet>
428
+ EIGEN_DEVICE_FUNC inline Packet pcmp_le(const Packet& a, const Packet& b) {
429
+ return a <= b ? ptrue(a) : pzero(a);
430
+ }
431
+
432
+ /** \internal \returns a < b as a bit mask */
433
+ template <typename Packet>
434
+ EIGEN_DEVICE_FUNC inline Packet pcmp_lt(const Packet& a, const Packet& b) {
435
+ return a < b ? ptrue(a) : pzero(a);
436
+ }
437
+
438
+ /** \internal \returns a == b as a bit mask */
439
+ template <typename Packet>
440
+ EIGEN_DEVICE_FUNC inline Packet pcmp_eq(const Packet& a, const Packet& b) {
441
+ return a == b ? ptrue(a) : pzero(a);
442
+ }
443
+
444
+ /** \internal \returns a < b or a==NaN or b==NaN as a bit mask */
445
+ template <typename Packet>
446
+ EIGEN_DEVICE_FUNC inline Packet pcmp_lt_or_nan(const Packet& a, const Packet& b) {
447
+ return a >= b ? pzero(a) : ptrue(a);
448
+ }
449
+
450
+ template <typename T>
451
+ struct bit_and {
452
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a & b; }
453
+ };
454
+
455
+ template <typename T>
456
+ struct bit_or {
457
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a | b; }
458
+ };
459
+
460
+ template <typename T>
461
+ struct bit_xor {
462
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a, const T& b) const { return a ^ b; }
463
+ };
464
+
465
+ template <typename T>
466
+ struct bit_not {
467
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE T operator()(const T& a) const { return ~a; }
468
+ };
469
+
470
+ template <>
471
+ struct bit_and<bool> {
472
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a, const bool& b) const {
473
+ return a && b;
474
+ }
475
+ };
476
+
477
+ template <>
478
+ struct bit_or<bool> {
479
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a, const bool& b) const {
480
+ return a || b;
481
+ }
482
+ };
483
+
484
+ template <>
485
+ struct bit_xor<bool> {
486
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a, const bool& b) const {
487
+ return a != b;
488
+ }
489
+ };
490
+
491
+ template <>
492
+ struct bit_not<bool> {
493
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE bool operator()(const bool& a) const { return !a; }
494
+ };
495
+
496
+ // Use operators &, |, ^, ~.
497
+ template <typename T>
498
+ struct operator_bitwise_helper {
499
+ EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) { return bit_and<T>()(a, b); }
500
+ EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return bit_or<T>()(a, b); }
501
+ EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) { return bit_xor<T>()(a, b); }
502
+ EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return bit_not<T>()(a); }
503
+ };
504
+
505
+ // Apply binary operations byte-by-byte
506
+ template <typename T>
507
+ struct bytewise_bitwise_helper {
508
+ EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) {
509
+ return binary(a, b, bit_and<unsigned char>());
510
+ }
511
+ EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) { return binary(a, b, bit_or<unsigned char>()); }
512
+ EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) {
513
+ return binary(a, b, bit_xor<unsigned char>());
514
+ }
515
+ EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) { return unary(a, bit_not<unsigned char>()); }
516
+
517
+ private:
518
+ template <typename Op>
519
+ EIGEN_DEVICE_FUNC static inline T unary(const T& a, Op op) {
520
+ const unsigned char* a_ptr = reinterpret_cast<const unsigned char*>(&a);
521
+ T c;
522
+ unsigned char* c_ptr = reinterpret_cast<unsigned char*>(&c);
523
+ for (size_t i = 0; i < sizeof(T); ++i) {
524
+ *c_ptr++ = op(*a_ptr++);
525
+ }
526
+ return c;
527
+ }
528
+
529
+ template <typename Op>
530
+ EIGEN_DEVICE_FUNC static inline T binary(const T& a, const T& b, Op op) {
531
+ const unsigned char* a_ptr = reinterpret_cast<const unsigned char*>(&a);
532
+ const unsigned char* b_ptr = reinterpret_cast<const unsigned char*>(&b);
533
+ T c;
534
+ unsigned char* c_ptr = reinterpret_cast<unsigned char*>(&c);
535
+ for (size_t i = 0; i < sizeof(T); ++i) {
536
+ *c_ptr++ = op(*a_ptr++, *b_ptr++);
537
+ }
538
+ return c;
539
+ }
540
+ };
541
+
542
+ // In the general case, use byte-by-byte manipulation.
543
+ template <typename T, typename EnableIf = void>
544
+ struct bitwise_helper : public bytewise_bitwise_helper<T> {};
545
+
546
+ // For integers or non-trivial scalars, use binary operators.
547
+ template <typename T>
548
+ struct bitwise_helper<T, typename std::enable_if_t<is_scalar<T>::value &&
549
+ (NumTraits<T>::IsInteger || NumTraits<T>::RequireInitialization)>>
550
+ : public operator_bitwise_helper<T> {};
551
+
552
+ /** \internal \returns the bitwise and of \a a and \a b */
553
+ template <typename Packet>
554
+ EIGEN_DEVICE_FUNC inline Packet pand(const Packet& a, const Packet& b) {
555
+ return bitwise_helper<Packet>::bitwise_and(a, b);
556
+ }
557
+
558
+ /** \internal \returns the bitwise or of \a a and \a b */
559
+ template <typename Packet>
560
+ EIGEN_DEVICE_FUNC inline Packet por(const Packet& a, const Packet& b) {
561
+ return bitwise_helper<Packet>::bitwise_or(a, b);
562
+ }
563
+
564
+ /** \internal \returns the bitwise xor of \a a and \a b */
565
+ template <typename Packet>
566
+ EIGEN_DEVICE_FUNC inline Packet pxor(const Packet& a, const Packet& b) {
567
+ return bitwise_helper<Packet>::bitwise_xor(a, b);
568
+ }
569
+
570
+ /** \internal \returns the bitwise not of \a a */
571
+ template <typename Packet>
572
+ EIGEN_DEVICE_FUNC inline Packet pnot(const Packet& a) {
573
+ return bitwise_helper<Packet>::bitwise_not(a);
574
+ }
575
+
576
+ /** \internal \returns the bitwise and of \a a and not \a b */
577
+ template <typename Packet>
578
+ EIGEN_DEVICE_FUNC inline Packet pandnot(const Packet& a, const Packet& b) {
579
+ return pand(a, pnot(b));
580
+ }
581
+
582
+ // In the general case, use bitwise select.
583
+ template <typename Packet, typename EnableIf = void>
584
+ struct pselect_impl {
585
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) {
586
+ return por(pand(a, mask), pandnot(b, mask));
587
+ }
588
+ };
589
+
590
+ // For scalars, use ternary select.
591
+ template <typename Packet>
592
+ struct pselect_impl<Packet, std::enable_if_t<is_scalar<Packet>::value>> {
593
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) {
594
+ return numext::equal_strict(mask, Packet(0)) ? b : a;
595
+ }
596
+ };
597
+
598
+ /** \internal \returns \a or \b for each field in packet according to \mask */
599
+ template <typename Packet>
600
+ EIGEN_DEVICE_FUNC inline Packet pselect(const Packet& mask, const Packet& a, const Packet& b) {
601
+ return pselect_impl<Packet>::run(mask, a, b);
602
+ }
603
+
604
+ template <>
605
+ EIGEN_DEVICE_FUNC inline bool pselect<bool>(const bool& cond, const bool& a, const bool& b) {
606
+ return cond ? a : b;
607
+ }
608
+
609
+ /** \internal \returns the min or of \a a and \a b (coeff-wise)
610
+ If either \a a or \a b are NaN, the result is implementation defined. */
611
+ template <int NaNPropagation>
612
+ struct pminmax_impl {
613
+ template <typename Packet, typename Op>
614
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
615
+ return op(a, b);
616
+ }
617
+ };
618
+
619
+ /** \internal \returns the min or max of \a a and \a b (coeff-wise)
620
+ If either \a a or \a b are NaN, NaN is returned. */
621
+ template <>
622
+ struct pminmax_impl<PropagateNaN> {
623
+ template <typename Packet, typename Op>
624
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
625
+ Packet not_nan_mask_a = pcmp_eq(a, a);
626
+ Packet not_nan_mask_b = pcmp_eq(b, b);
627
+ return pselect(not_nan_mask_a, pselect(not_nan_mask_b, op(a, b), b), a);
628
+ }
629
+ };
630
+
631
+ /** \internal \returns the min or max of \a a and \a b (coeff-wise)
632
+ If both \a a and \a b are NaN, NaN is returned.
633
+ Equivalent to std::fmin(a, b). */
634
+ template <>
635
+ struct pminmax_impl<PropagateNumbers> {
636
+ template <typename Packet, typename Op>
637
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
638
+ Packet not_nan_mask_a = pcmp_eq(a, a);
639
+ Packet not_nan_mask_b = pcmp_eq(b, b);
640
+ return pselect(not_nan_mask_a, pselect(not_nan_mask_b, op(a, b), a), b);
641
+ }
642
+ };
643
+
644
+ #define EIGEN_BINARY_OP_NAN_PROPAGATION(Type, Func) [](const Type& a, const Type& b) { return Func(a, b); }
645
+
646
+ /** \internal \returns the min of \a a and \a b (coeff-wise).
647
+ If \a a or \b b is NaN, the return value is implementation defined. */
648
+ template <typename Packet>
649
+ EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) {
650
+ return numext::mini(a, b);
651
+ }
652
+
653
+ /** \internal \returns the min of \a a and \a b (coeff-wise).
654
+ NaNPropagation determines the NaN propagation semantics. */
655
+ template <int NaNPropagation, typename Packet>
656
+ EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) {
657
+ return pminmax_impl<NaNPropagation>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin<Packet>)));
658
+ }
659
+
660
+ /** \internal \returns the max of \a a and \a b (coeff-wise)
661
+ If \a a or \b b is NaN, the return value is implementation defined. */
662
+ template <typename Packet>
663
+ EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) {
664
+ return numext::maxi(a, b);
665
+ }
666
+
667
+ /** \internal \returns the max of \a a and \a b (coeff-wise).
668
+ NaNPropagation determines the NaN propagation semantics. */
669
+ template <int NaNPropagation, typename Packet>
670
+ EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) {
671
+ return pminmax_impl<NaNPropagation>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmax<Packet>)));
672
+ }
673
+
674
+ /** \internal \returns the absolute value of \a a */
675
+ template <typename Packet>
676
+ EIGEN_DEVICE_FUNC inline Packet pabs(const Packet& a) {
677
+ return numext::abs(a);
678
+ }
679
+ template <>
680
+ EIGEN_DEVICE_FUNC inline unsigned int pabs(const unsigned int& a) {
681
+ return a;
682
+ }
683
+ template <>
684
+ EIGEN_DEVICE_FUNC inline unsigned long pabs(const unsigned long& a) {
685
+ return a;
686
+ }
687
+ template <>
688
+ EIGEN_DEVICE_FUNC inline unsigned long long pabs(const unsigned long long& a) {
689
+ return a;
690
+ }
691
+
692
+ /** \internal \returns the addsub value of \a a,b */
693
+ template <typename Packet>
694
+ EIGEN_DEVICE_FUNC inline Packet paddsub(const Packet& a, const Packet& b) {
695
+ return pselect(peven_mask(a), padd(a, b), psub(a, b));
696
+ }
697
+
698
+ /** \internal \returns the phase angle of \a a */
699
+ template <typename Packet>
700
+ EIGEN_DEVICE_FUNC inline Packet parg(const Packet& a) {
701
+ using numext::arg;
702
+ return arg(a);
703
+ }
704
+
705
+ /** \internal \returns \a a arithmetically shifted by N bits to the right */
706
+ template <int N, typename T>
707
+ EIGEN_DEVICE_FUNC inline T parithmetic_shift_right(const T& a) {
708
+ return numext::arithmetic_shift_right(a, N);
709
+ }
710
+
711
+ /** \internal \returns \a a logically shifted by N bits to the right */
712
+ template <int N, typename T>
713
+ EIGEN_DEVICE_FUNC inline T plogical_shift_right(const T& a) {
714
+ return numext::logical_shift_right(a, N);
715
+ }
716
+
717
+ /** \internal \returns \a a shifted by N bits to the left */
718
+ template <int N, typename T>
719
+ EIGEN_DEVICE_FUNC inline T plogical_shift_left(const T& a) {
720
+ return numext::logical_shift_left(a, N);
721
+ }
722
+
723
+ /** \internal \returns the significant and exponent of the underlying floating point numbers
724
+ * See https://en.cppreference.com/w/cpp/numeric/math/frexp
725
+ */
726
+ template <typename Packet>
727
+ EIGEN_DEVICE_FUNC inline Packet pfrexp(const Packet& a, Packet& exponent) {
728
+ int exp;
729
+ EIGEN_USING_STD(frexp);
730
+ Packet result = static_cast<Packet>(frexp(a, &exp));
731
+ exponent = static_cast<Packet>(exp);
732
+ return result;
733
+ }
734
+
735
+ /** \internal \returns a * 2^((int)exponent)
736
+ * See https://en.cppreference.com/w/cpp/numeric/math/ldexp
737
+ */
738
+ template <typename Packet>
739
+ EIGEN_DEVICE_FUNC inline Packet pldexp(const Packet& a, const Packet& exponent) {
740
+ EIGEN_USING_STD(ldexp)
741
+ return static_cast<Packet>(ldexp(a, static_cast<int>(exponent)));
742
+ }
743
+
744
+ /** \internal \returns the min of \a a and \a b (coeff-wise) */
745
+ template <typename Packet>
746
+ EIGEN_DEVICE_FUNC inline Packet pabsdiff(const Packet& a, const Packet& b) {
747
+ return pselect(pcmp_lt(a, b), psub(b, a), psub(a, b));
748
+ }
749
+
750
+ /** \internal \returns a packet version of \a *from, from must be properly aligned */
751
+ template <typename Packet>
752
+ EIGEN_DEVICE_FUNC inline Packet pload(const typename unpacket_traits<Packet>::type* from) {
753
+ return *from;
754
+ }
755
+
756
+ /** \internal \returns n elements of a packet version of \a *from, from must be properly aligned
757
+ * offset indicates the starting element in which to load and
758
+ * offset + n <= unpacket_traits::size
759
+ * All elements before offset and after the last element loaded will initialized with zero */
760
+ template <typename Packet>
761
+ EIGEN_DEVICE_FUNC inline Packet pload_partial(const typename unpacket_traits<Packet>::type* from, const Index n,
762
+ const Index offset = 0) {
763
+ const Index packet_size = unpacket_traits<Packet>::size;
764
+ eigen_assert(n + offset <= packet_size && "number of elements plus offset will read past end of packet");
765
+ typedef typename unpacket_traits<Packet>::type Scalar;
766
+ EIGEN_ALIGN_MAX Scalar elements[packet_size] = {Scalar(0)};
767
+ for (Index i = offset; i < numext::mini(n + offset, packet_size); i++) {
768
+ elements[i] = from[i - offset];
769
+ }
770
+ return pload<Packet>(elements);
771
+ }
772
+
773
+ /** \internal \returns a packet version of \a *from, (un-aligned load) */
774
+ template <typename Packet>
775
+ EIGEN_DEVICE_FUNC inline Packet ploadu(const typename unpacket_traits<Packet>::type* from) {
776
+ return *from;
777
+ }
778
+
779
+ /** \internal \returns n elements of a packet version of \a *from, (un-aligned load)
780
+ * All elements after the last element loaded will initialized with zero */
781
+ template <typename Packet>
782
+ EIGEN_DEVICE_FUNC inline Packet ploadu_partial(const typename unpacket_traits<Packet>::type* from, const Index n,
783
+ const Index offset = 0) {
784
+ const Index packet_size = unpacket_traits<Packet>::size;
785
+ eigen_assert(n + offset <= packet_size && "number of elements plus offset will read past end of packet");
786
+ typedef typename unpacket_traits<Packet>::type Scalar;
787
+ EIGEN_ALIGN_MAX Scalar elements[packet_size] = {Scalar(0)};
788
+ for (Index i = offset; i < numext::mini(n + offset, packet_size); i++) {
789
+ elements[i] = from[i - offset];
790
+ }
791
+ return pload<Packet>(elements);
792
+ }
793
+
794
+ /** \internal \returns a packet version of \a *from, (un-aligned masked load)
795
+ * There is no generic implementation. We only have implementations for specialized
796
+ * cases. Generic case should not be called.
797
+ */
798
+ template <typename Packet>
799
+ EIGEN_DEVICE_FUNC inline std::enable_if_t<unpacket_traits<Packet>::masked_load_available, Packet> ploadu(
800
+ const typename unpacket_traits<Packet>::type* from, typename unpacket_traits<Packet>::mask_t umask);
801
+
802
+ /** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
803
+ template <typename Packet>
804
+ EIGEN_DEVICE_FUNC inline Packet pset1(const typename unpacket_traits<Packet>::type& a) {
805
+ return a;
806
+ }
807
+
808
+ /** \internal \returns a packet with constant coefficients set from bits */
809
+ template <typename Packet, typename BitsType>
810
+ EIGEN_DEVICE_FUNC inline Packet pset1frombits(BitsType a);
811
+
812
+ /** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */
813
+ template <typename Packet>
814
+ EIGEN_DEVICE_FUNC inline Packet pload1(const typename unpacket_traits<Packet>::type* a) {
815
+ return pset1<Packet>(*a);
816
+ }
817
+
818
+ /** \internal \returns a packet with elements of \a *from duplicated.
819
+ * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and
820
+ * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]}
821
+ * Currently, this function is only used for scalar * complex products.
822
+ */
823
+ template <typename Packet>
824
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ploaddup(const typename unpacket_traits<Packet>::type* from) {
825
+ return *from;
826
+ }
827
+
828
+ /** \internal \returns a packet with elements of \a *from quadrupled.
829
+ * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and
830
+ * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}
831
+ * Currently, this function is only used in matrix products.
832
+ * For packet-size smaller or equal to 4, this function is equivalent to pload1
833
+ */
834
+ template <typename Packet>
835
+ EIGEN_DEVICE_FUNC inline Packet ploadquad(const typename unpacket_traits<Packet>::type* from) {
836
+ return pload1<Packet>(from);
837
+ }
838
+
839
+ /** \internal equivalent to
840
+ * \code
841
+ * a0 = pload1(a+0);
842
+ * a1 = pload1(a+1);
843
+ * a2 = pload1(a+2);
844
+ * a3 = pload1(a+3);
845
+ * \endcode
846
+ * \sa pset1, pload1, ploaddup, pbroadcast2
847
+ */
848
+ template <typename Packet>
849
+ EIGEN_DEVICE_FUNC inline void pbroadcast4(const typename unpacket_traits<Packet>::type* a, Packet& a0, Packet& a1,
850
+ Packet& a2, Packet& a3) {
851
+ a0 = pload1<Packet>(a + 0);
852
+ a1 = pload1<Packet>(a + 1);
853
+ a2 = pload1<Packet>(a + 2);
854
+ a3 = pload1<Packet>(a + 3);
855
+ }
856
+
857
+ /** \internal equivalent to
858
+ * \code
859
+ * a0 = pload1(a+0);
860
+ * a1 = pload1(a+1);
861
+ * \endcode
862
+ * \sa pset1, pload1, ploaddup, pbroadcast4
863
+ */
864
+ template <typename Packet>
865
+ EIGEN_DEVICE_FUNC inline void pbroadcast2(const typename unpacket_traits<Packet>::type* a, Packet& a0, Packet& a1) {
866
+ a0 = pload1<Packet>(a + 0);
867
+ a1 = pload1<Packet>(a + 1);
868
+ }
869
+
870
+ /** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
871
+ template <typename Packet>
872
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet plset(const typename unpacket_traits<Packet>::type& a) {
873
+ return a;
874
+ }
875
+
876
+ /** \internal \returns a packet with constant coefficients \a a, e.g.: (x, 0, x, 0),
877
+ where x is the value of all 1-bits. */
878
+ template <typename Packet>
879
+ EIGEN_DEVICE_FUNC inline Packet peven_mask(const Packet& /*a*/) {
880
+ typedef typename unpacket_traits<Packet>::type Scalar;
881
+ const size_t n = unpacket_traits<Packet>::size;
882
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n];
883
+ for (size_t i = 0; i < n; ++i) {
884
+ memset(elements + i, ((i & 1) == 0 ? 0xff : 0), sizeof(Scalar));
885
+ }
886
+ return ploadu<Packet>(elements);
887
+ }
888
+
889
+ /** \internal copy the packet \a from to \a *to, \a to must be properly aligned */
890
+ template <typename Scalar, typename Packet>
891
+ EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from) {
892
+ (*to) = from;
893
+ }
894
+
895
+ /** \internal copy n elements of the packet \a from to \a *to, \a to must be properly aligned
896
+ * offset indicates the starting element in which to store and
897
+ * offset + n <= unpacket_traits::size */
898
+ template <typename Scalar, typename Packet>
899
+ EIGEN_DEVICE_FUNC inline void pstore_partial(Scalar* to, const Packet& from, const Index n, const Index offset = 0) {
900
+ const Index packet_size = unpacket_traits<Packet>::size;
901
+ eigen_assert(n + offset <= packet_size && "number of elements plus offset will write past end of packet");
902
+ EIGEN_ALIGN_MAX Scalar elements[packet_size];
903
+ pstore<Scalar>(elements, from);
904
+ for (Index i = 0; i < numext::mini(n, packet_size - offset); i++) {
905
+ to[i] = elements[i + offset];
906
+ }
907
+ }
908
+
909
+ /** \internal copy the packet \a from to \a *to, (un-aligned store) */
910
+ template <typename Scalar, typename Packet>
911
+ EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from) {
912
+ (*to) = from;
913
+ }
914
+
915
+ /** \internal copy n elements of the packet \a from to \a *to, (un-aligned store) */
916
+ template <typename Scalar, typename Packet>
917
+ EIGEN_DEVICE_FUNC inline void pstoreu_partial(Scalar* to, const Packet& from, const Index n, const Index offset = 0) {
918
+ const Index packet_size = unpacket_traits<Packet>::size;
919
+ eigen_assert(n + offset <= packet_size && "number of elements plus offset will write past end of packet");
920
+ EIGEN_ALIGN_MAX Scalar elements[packet_size];
921
+ pstore<Scalar>(elements, from);
922
+ for (Index i = 0; i < numext::mini(n, packet_size - offset); i++) {
923
+ to[i] = elements[i + offset];
924
+ }
925
+ }
926
+
927
+ /** \internal copy the packet \a from to \a *to, (un-aligned store with a mask)
928
+ * There is no generic implementation. We only have implementations for specialized
929
+ * cases. Generic case should not be called.
930
+ */
931
+ template <typename Scalar, typename Packet>
932
+ EIGEN_DEVICE_FUNC inline std::enable_if_t<unpacket_traits<Packet>::masked_store_available, void> pstoreu(
933
+ Scalar* to, const Packet& from, typename unpacket_traits<Packet>::mask_t umask);
934
+
935
+ template <typename Scalar, typename Packet>
936
+ EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/) {
937
+ return ploadu<Packet>(from);
938
+ }
939
+
940
+ template <typename Scalar, typename Packet>
941
+ EIGEN_DEVICE_FUNC inline Packet pgather_partial(const Scalar* from, Index stride, const Index n) {
942
+ const Index packet_size = unpacket_traits<Packet>::size;
943
+ EIGEN_ALIGN_MAX Scalar elements[packet_size] = {Scalar(0)};
944
+ for (Index i = 0; i < numext::mini(n, packet_size); i++) {
945
+ elements[i] = from[i * stride];
946
+ }
947
+ return pload<Packet>(elements);
948
+ }
949
+
950
+ template <typename Scalar, typename Packet>
951
+ EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/) {
952
+ pstore(to, from);
953
+ }
954
+
955
+ template <typename Scalar, typename Packet>
956
+ EIGEN_DEVICE_FUNC inline void pscatter_partial(Scalar* to, const Packet& from, Index stride, const Index n) {
957
+ const Index packet_size = unpacket_traits<Packet>::size;
958
+ EIGEN_ALIGN_MAX Scalar elements[packet_size];
959
+ pstore<Scalar>(elements, from);
960
+ for (Index i = 0; i < numext::mini(n, packet_size); i++) {
961
+ to[i * stride] = elements[i];
962
+ }
963
+ }
964
+
965
+ /** \internal tries to do cache prefetching of \a addr */
966
+ template <typename Scalar>
967
+ EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr) {
968
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
969
+ // do nothing
970
+ #elif defined(EIGEN_CUDA_ARCH)
971
+ #if defined(__LP64__) || EIGEN_OS_WIN64
972
+ // 64-bit pointer operand constraint for inlined asm
973
+ asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
974
+ #else
975
+ // 32-bit pointer operand constraint for inlined asm
976
+ asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr));
977
+ #endif
978
+ #elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC)
979
+ __builtin_prefetch(addr);
980
+ #endif
981
+ }
982
+
983
+ /** \internal \returns the reversed elements of \a a*/
984
+ template <typename Packet>
985
+ EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a) {
986
+ return a;
987
+ }
988
+
989
+ /** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
990
+ template <typename Packet>
991
+ EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a) {
992
+ return Packet(numext::imag(a), numext::real(a));
993
+ }
994
+
995
+ /**************************
996
+ * Special math functions
997
+ ***************************/
998
+
999
+ /** \internal \returns isnan(a) */
1000
+ template <typename Packet>
1001
+ EIGEN_DEVICE_FUNC inline Packet pisnan(const Packet& a) {
1002
+ return pandnot(ptrue(a), pcmp_eq(a, a));
1003
+ }
1004
+
1005
+ /** \internal \returns isinf(a) */
1006
+ template <typename Packet>
1007
+ EIGEN_DEVICE_FUNC inline Packet pisinf(const Packet& a) {
1008
+ using Scalar = typename unpacket_traits<Packet>::type;
1009
+ constexpr Scalar inf = NumTraits<Scalar>::infinity();
1010
+ return pcmp_eq(pabs(a), pset1<Packet>(inf));
1011
+ }
1012
+
1013
+ /** \internal \returns the sine of \a a (coeff-wise) */
1014
+ template <typename Packet>
1015
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psin(const Packet& a) {
1016
+ EIGEN_USING_STD(sin);
1017
+ return sin(a);
1018
+ }
1019
+
1020
+ /** \internal \returns the cosine of \a a (coeff-wise) */
1021
+ template <typename Packet>
1022
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcos(const Packet& a) {
1023
+ EIGEN_USING_STD(cos);
1024
+ return cos(a);
1025
+ }
1026
+
1027
+ /** \internal \returns the tan of \a a (coeff-wise) */
1028
+ template <typename Packet>
1029
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptan(const Packet& a) {
1030
+ EIGEN_USING_STD(tan);
1031
+ return tan(a);
1032
+ }
1033
+
1034
+ /** \internal \returns the arc sine of \a a (coeff-wise) */
1035
+ template <typename Packet>
1036
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pasin(const Packet& a) {
1037
+ EIGEN_USING_STD(asin);
1038
+ return asin(a);
1039
+ }
1040
+
1041
+ /** \internal \returns the arc cosine of \a a (coeff-wise) */
1042
+ template <typename Packet>
1043
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pacos(const Packet& a) {
1044
+ EIGEN_USING_STD(acos);
1045
+ return acos(a);
1046
+ }
1047
+
1048
+ /** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
1049
+ template <typename Packet>
1050
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psinh(const Packet& a) {
1051
+ EIGEN_USING_STD(sinh);
1052
+ return sinh(a);
1053
+ }
1054
+
1055
+ /** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
1056
+ template <typename Packet>
1057
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcosh(const Packet& a) {
1058
+ EIGEN_USING_STD(cosh);
1059
+ return cosh(a);
1060
+ }
1061
+
1062
+ /** \internal \returns the arc tangent of \a a (coeff-wise) */
1063
+ template <typename Packet>
1064
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patan(const Packet& a) {
1065
+ EIGEN_USING_STD(atan);
1066
+ return atan(a);
1067
+ }
1068
+
1069
+ /** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
1070
+ template <typename Packet>
1071
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptanh(const Packet& a) {
1072
+ EIGEN_USING_STD(tanh);
1073
+ return tanh(a);
1074
+ }
1075
+
1076
+ /** \internal \returns the arc tangent of \a a (coeff-wise) */
1077
+ template <typename Packet>
1078
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patanh(const Packet& a) {
1079
+ EIGEN_USING_STD(atanh);
1080
+ return atanh(a);
1081
+ }
1082
+
1083
+ /** \internal \returns the exp of \a a (coeff-wise) */
1084
+ template <typename Packet>
1085
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp(const Packet& a) {
1086
+ EIGEN_USING_STD(exp);
1087
+ return exp(a);
1088
+ }
1089
+
1090
+ /** \internal \returns the expm1 of \a a (coeff-wise) */
1091
+ template <typename Packet>
1092
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexpm1(const Packet& a) {
1093
+ return numext::expm1(a);
1094
+ }
1095
+
1096
+ /** \internal \returns the log of \a a (coeff-wise) */
1097
+ template <typename Packet>
1098
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog(const Packet& a) {
1099
+ EIGEN_USING_STD(log);
1100
+ return log(a);
1101
+ }
1102
+
1103
+ /** \internal \returns the log1p of \a a (coeff-wise) */
1104
+ template <typename Packet>
1105
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog1p(const Packet& a) {
1106
+ return numext::log1p(a);
1107
+ }
1108
+
1109
+ /** \internal \returns the log10 of \a a (coeff-wise) */
1110
+ template <typename Packet>
1111
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog10(const Packet& a) {
1112
+ EIGEN_USING_STD(log10);
1113
+ return log10(a);
1114
+ }
1115
+
1116
+ /** \internal \returns the log10 of \a a (coeff-wise) */
1117
+ template <typename Packet>
1118
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog2(const Packet& a) {
1119
+ using Scalar = typename internal::unpacket_traits<Packet>::type;
1120
+ using RealScalar = typename NumTraits<Scalar>::Real;
1121
+ return pmul(pset1<Packet>(Scalar(RealScalar(EIGEN_LOG2E))), plog(a));
1122
+ }
1123
+
1124
+ /** \internal \returns the square-root of \a a (coeff-wise) */
1125
+ template <typename Packet>
1126
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt(const Packet& a) {
1127
+ return numext::sqrt(a);
1128
+ }
1129
+
1130
+ /** \internal \returns the cube-root of \a a (coeff-wise) */
1131
+ template <typename Packet>
1132
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcbrt(const Packet& a) {
1133
+ return numext::cbrt(a);
1134
+ }
1135
+
1136
+ template <typename Packet, bool IsScalar = is_scalar<Packet>::value,
1137
+ bool IsInteger = NumTraits<typename unpacket_traits<Packet>::type>::IsInteger>
1138
+ struct nearest_integer_packetop_impl {
1139
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_floor(const Packet& x) { return numext::floor(x); }
1140
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_ceil(const Packet& x) { return numext::ceil(x); }
1141
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_rint(const Packet& x) { return numext::rint(x); }
1142
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_round(const Packet& x) { return numext::round(x); }
1143
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run_trunc(const Packet& x) { return numext::trunc(x); }
1144
+ };
1145
+
1146
+ /** \internal \returns the rounded value of \a a (coeff-wise) */
1147
+ template <typename Packet>
1148
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pround(const Packet& a) {
1149
+ return nearest_integer_packetop_impl<Packet>::run_round(a);
1150
+ }
1151
+
1152
+ /** \internal \returns the floor of \a a (coeff-wise) */
1153
+ template <typename Packet>
1154
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pfloor(const Packet& a) {
1155
+ return nearest_integer_packetop_impl<Packet>::run_floor(a);
1156
+ }
1157
+
1158
+ /** \internal \returns the rounded value of \a a (coeff-wise) with current
1159
+ * rounding mode */
1160
+ template <typename Packet>
1161
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet print(const Packet& a) {
1162
+ return nearest_integer_packetop_impl<Packet>::run_rint(a);
1163
+ }
1164
+
1165
+ /** \internal \returns the ceil of \a a (coeff-wise) */
1166
+ template <typename Packet>
1167
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pceil(const Packet& a) {
1168
+ return nearest_integer_packetop_impl<Packet>::run_ceil(a);
1169
+ }
1170
+
1171
+ /** \internal \returns the truncation of \a a (coeff-wise) */
1172
+ template <typename Packet>
1173
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ptrunc(const Packet& a) {
1174
+ return nearest_integer_packetop_impl<Packet>::run_trunc(a);
1175
+ }
1176
+
1177
+ template <typename Packet, typename EnableIf = void>
1178
+ struct psign_impl {
1179
+ static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a) { return numext::sign(a); }
1180
+ };
1181
+
1182
+ /** \internal \returns the sign of \a a (coeff-wise) */
1183
+ template <typename Packet>
1184
+ EIGEN_DEVICE_FUNC inline Packet psign(const Packet& a) {
1185
+ return psign_impl<Packet>::run(a);
1186
+ }
1187
+
1188
+ template <>
1189
+ EIGEN_DEVICE_FUNC inline bool psign(const bool& a) {
1190
+ return a;
1191
+ }
1192
+
1193
+ /** \internal \returns the first element of a packet */
1194
+ template <typename Packet>
1195
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type pfirst(const Packet& a) {
1196
+ return a;
1197
+ }
1198
+
1199
+ /** \internal \returns the sum of the elements of upper and lower half of \a a if \a a is larger than 4.
1200
+ * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}
1201
+ * For packet-size smaller or equal to 4, this boils down to a noop.
1202
+ */
1203
+ template <typename Packet>
1204
+ EIGEN_DEVICE_FUNC inline std::conditional_t<(unpacket_traits<Packet>::size % 8) == 0,
1205
+ typename unpacket_traits<Packet>::half, Packet>
1206
+ predux_half_dowto4(const Packet& a) {
1207
+ return a;
1208
+ }
1209
+
1210
+ // Slow generic implementation of Packet reduction.
1211
+ template <typename Packet, typename Op>
1212
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_helper(const Packet& a, Op op) {
1213
+ typedef typename unpacket_traits<Packet>::type Scalar;
1214
+ const size_t n = unpacket_traits<Packet>::size;
1215
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) Scalar elements[n];
1216
+ pstoreu<Scalar>(elements, a);
1217
+ for (size_t k = n / 2; k > 0; k /= 2) {
1218
+ for (size_t i = 0; i < k; ++i) {
1219
+ elements[i] = op(elements[i], elements[i + k]);
1220
+ }
1221
+ }
1222
+ return elements[0];
1223
+ }
1224
+
1225
+ /** \internal \returns the sum of the elements of \a a*/
1226
+ template <typename Packet>
1227
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a) {
1228
+ return a;
1229
+ }
1230
+
1231
+ /** \internal \returns the product of the elements of \a a */
1232
+ template <typename Packet>
1233
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a) {
1234
+ typedef typename unpacket_traits<Packet>::type Scalar;
1235
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmul<Scalar>)));
1236
+ }
1237
+
1238
+ /** \internal \returns the min of the elements of \a a */
1239
+ template <typename Packet>
1240
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a) {
1241
+ typedef typename unpacket_traits<Packet>::type Scalar;
1242
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<PropagateFast, Scalar>)));
1243
+ }
1244
+
1245
+ template <int NaNPropagation, typename Packet>
1246
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a) {
1247
+ typedef typename unpacket_traits<Packet>::type Scalar;
1248
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<NaNPropagation, Scalar>)));
1249
+ }
1250
+
1251
+ /** \internal \returns the min of the elements of \a a */
1252
+ template <typename Packet>
1253
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a) {
1254
+ typedef typename unpacket_traits<Packet>::type Scalar;
1255
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<PropagateFast, Scalar>)));
1256
+ }
1257
+
1258
+ template <int NaNPropagation, typename Packet>
1259
+ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a) {
1260
+ typedef typename unpacket_traits<Packet>::type Scalar;
1261
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<NaNPropagation, Scalar>)));
1262
+ }
1263
+
1264
+ #undef EIGEN_BINARY_OP_NAN_PROPAGATION
1265
+
1266
+ /** \internal \returns true if all coeffs of \a a means "true"
1267
+ * It is supposed to be called on values returned by pcmp_*.
1268
+ */
1269
+ // not needed yet
1270
+ // template<typename Packet> EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a)
1271
+ // { return bool(a); }
1272
+
1273
+ /** \internal \returns true if any coeffs of \a a means "true"
1274
+ * It is supposed to be called on values returned by pcmp_*.
1275
+ */
1276
+ template <typename Packet>
1277
+ EIGEN_DEVICE_FUNC inline bool predux_any(const Packet& a) {
1278
+ // Dirty but generic implementation where "true" is assumed to be non 0 and all the sames.
1279
+ // It is expected that "true" is either:
1280
+ // - Scalar(1)
1281
+ // - bits full of ones (NaN for floats),
1282
+ // - or first bit equals to 1 (1 for ints, smallest denormal for floats).
1283
+ // For all these cases, taking the sum is just fine, and this boils down to a no-op for scalars.
1284
+ typedef typename unpacket_traits<Packet>::type Scalar;
1285
+ return numext::not_equal_strict(predux(a), Scalar(0));
1286
+ }
1287
+
1288
+ /***************************************************************************
1289
+ * The following functions might not have to be overwritten for vectorized types
1290
+ ***************************************************************************/
1291
+
1292
+ // FMA instructions.
1293
+ /** \internal \returns a * b + c (coeff-wise) */
1294
+ template <typename Packet>
1295
+ EIGEN_DEVICE_FUNC inline Packet pmadd(const Packet& a, const Packet& b, const Packet& c) {
1296
+ return padd(pmul(a, b), c);
1297
+ }
1298
+
1299
+ /** \internal \returns a * b - c (coeff-wise) */
1300
+ template <typename Packet>
1301
+ EIGEN_DEVICE_FUNC inline Packet pmsub(const Packet& a, const Packet& b, const Packet& c) {
1302
+ return psub(pmul(a, b), c);
1303
+ }
1304
+
1305
+ /** \internal \returns -(a * b) + c (coeff-wise) */
1306
+ template <typename Packet>
1307
+ EIGEN_DEVICE_FUNC inline Packet pnmadd(const Packet& a, const Packet& b, const Packet& c) {
1308
+ return psub(c, pmul(a, b));
1309
+ }
1310
+
1311
+ /** \internal \returns -((a * b + c) (coeff-wise) */
1312
+ template <typename Packet>
1313
+ EIGEN_DEVICE_FUNC inline Packet pnmsub(const Packet& a, const Packet& b, const Packet& c) {
1314
+ return pnegate(pmadd(a, b, c));
1315
+ }
1316
+
1317
+ /** \internal copy a packet with constant coefficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned
1318
+ */
1319
+ // NOTE: this function must really be templated on the packet type (think about different packet types for the same
1320
+ // scalar type)
1321
+ template <typename Packet>
1322
+ inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a) {
1323
+ pstore(to, pset1<Packet>(a));
1324
+ }
1325
+
1326
+ /** \internal \returns a packet version of \a *from.
1327
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
1328
+ template <typename Packet, int Alignment>
1329
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits<Packet>::type* from) {
1330
+ if (Alignment >= unpacket_traits<Packet>::alignment)
1331
+ return pload<Packet>(from);
1332
+ else
1333
+ return ploadu<Packet>(from);
1334
+ }
1335
+
1336
+ /** \internal \returns n elements of a packet version of \a *from.
1337
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
1338
+ template <typename Packet, int Alignment>
1339
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_partial(const typename unpacket_traits<Packet>::type* from,
1340
+ const Index n, const Index offset = 0) {
1341
+ if (Alignment >= unpacket_traits<Packet>::alignment)
1342
+ return pload_partial<Packet>(from, n, offset);
1343
+ else
1344
+ return ploadu_partial<Packet>(from, n, offset);
1345
+ }
1346
+
1347
+ /** \internal copy the packet \a from to \a *to.
1348
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
1349
+ template <typename Scalar, typename Packet, int Alignment>
1350
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from) {
1351
+ if (Alignment >= unpacket_traits<Packet>::alignment)
1352
+ pstore(to, from);
1353
+ else
1354
+ pstoreu(to, from);
1355
+ }
1356
+
1357
+ /** \internal copy n elements of the packet \a from to \a *to.
1358
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
1359
+ template <typename Scalar, typename Packet, int Alignment>
1360
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret_partial(Scalar* to, const Packet& from, const Index n,
1361
+ const Index offset = 0) {
1362
+ if (Alignment >= unpacket_traits<Packet>::alignment)
1363
+ pstore_partial(to, from, n, offset);
1364
+ else
1365
+ pstoreu_partial(to, from, n, offset);
1366
+ }
1367
+
1368
+ /** \internal \returns a packet version of \a *from.
1369
+ * Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the
1370
+ * hardware if available to speedup the loading of data that won't be modified
1371
+ * by the current computation.
1372
+ */
1373
+ template <typename Packet, int LoadMode>
1374
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits<Packet>::type* from) {
1375
+ return ploadt<Packet, LoadMode>(from);
1376
+ }
1377
+
1378
+ /***************************************************************************
1379
+ * Fast complex products (GCC generates a function call which is very slow)
1380
+ ***************************************************************************/
1381
+
1382
+ // Eigen+CUDA does not support complexes.
1383
+ #if !defined(EIGEN_GPUCC)
1384
+
1385
+ template <>
1386
+ inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b) {
1387
+ return std::complex<float>(a.real() * b.real() - a.imag() * b.imag(), a.imag() * b.real() + a.real() * b.imag());
1388
+ }
1389
+
1390
+ template <>
1391
+ inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b) {
1392
+ return std::complex<double>(a.real() * b.real() - a.imag() * b.imag(), a.imag() * b.real() + a.real() * b.imag());
1393
+ }
1394
+
1395
+ #endif
1396
+
1397
+ /***************************************************************************
1398
+ * PacketBlock, that is a collection of N packets where the number of words
1399
+ * in the packet is a multiple of N.
1400
+ ***************************************************************************/
1401
+ template <typename Packet, int N = unpacket_traits<Packet>::size>
1402
+ struct PacketBlock {
1403
+ Packet packet[N];
1404
+ };
1405
+
1406
+ template <typename Packet>
1407
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet, 1>& /*kernel*/) {
1408
+ // Nothing to do in the scalar case, i.e. a 1x1 matrix.
1409
+ }
1410
+
1411
+ /***************************************************************************
1412
+ * Selector, i.e. vector of N boolean values used to select (i.e. blend)
1413
+ * words from 2 packets.
1414
+ ***************************************************************************/
1415
+ template <size_t N>
1416
+ struct Selector {
1417
+ bool select[N];
1418
+ };
1419
+
1420
+ template <typename Packet>
1421
+ EIGEN_DEVICE_FUNC inline Packet pblend(const Selector<unpacket_traits<Packet>::size>& ifPacket,
1422
+ const Packet& thenPacket, const Packet& elsePacket) {
1423
+ return ifPacket.select[0] ? thenPacket : elsePacket;
1424
+ }
1425
+
1426
+ /** \internal \returns 1 / a (coeff-wise) */
1427
+ template <typename Packet>
1428
+ EIGEN_DEVICE_FUNC inline Packet preciprocal(const Packet& a) {
1429
+ using Scalar = typename unpacket_traits<Packet>::type;
1430
+ return pdiv(pset1<Packet>(Scalar(1)), a);
1431
+ }
1432
+
1433
+ /** \internal \returns the reciprocal square-root of \a a (coeff-wise) */
1434
+ template <typename Packet>
1435
+ EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet prsqrt(const Packet& a) {
1436
+ return preciprocal<Packet>(psqrt(a));
1437
+ }
1438
+
1439
+ template <typename Packet, bool IsScalar = is_scalar<Packet>::value,
1440
+ bool IsInteger = NumTraits<typename unpacket_traits<Packet>::type>::IsInteger>
1441
+ struct psignbit_impl;
1442
+ template <typename Packet, bool IsInteger>
1443
+ struct psignbit_impl<Packet, true, IsInteger> {
1444
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Packet run(const Packet& a) { return numext::signbit(a); }
1445
+ };
1446
+ template <typename Packet>
1447
+ struct psignbit_impl<Packet, false, false> {
1448
+ // generic implementation if not specialized in PacketMath.h
1449
+ // slower than arithmetic shift
1450
+ typedef typename unpacket_traits<Packet>::type Scalar;
1451
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static Packet run(const Packet& a) {
1452
+ const Packet cst_pos_one = pset1<Packet>(Scalar(1));
1453
+ const Packet cst_neg_one = pset1<Packet>(Scalar(-1));
1454
+ return pcmp_eq(por(pand(a, cst_neg_one), cst_pos_one), cst_neg_one);
1455
+ }
1456
+ };
1457
+ template <typename Packet>
1458
+ struct psignbit_impl<Packet, false, true> {
1459
+ // generic implementation for integer packets
1460
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static constexpr Packet run(const Packet& a) { return pcmp_lt(a, pzero(a)); }
1461
+ };
1462
+ /** \internal \returns the sign bit of \a a as a bitmask*/
1463
+ template <typename Packet>
1464
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE constexpr Packet psignbit(const Packet& a) {
1465
+ return psignbit_impl<Packet>::run(a);
1466
+ }
1467
+
1468
+ /** \internal \returns the 2-argument arc tangent of \a y and \a x (coeff-wise) */
1469
+ template <typename Packet, std::enable_if_t<is_scalar<Packet>::value, int> = 0>
1470
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet patan2(const Packet& y, const Packet& x) {
1471
+ return numext::atan2(y, x);
1472
+ }
1473
+
1474
+ /** \internal \returns the 2-argument arc tangent of \a y and \a x (coeff-wise) */
1475
+ template <typename Packet, std::enable_if_t<!is_scalar<Packet>::value, int> = 0>
1476
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet patan2(const Packet& y, const Packet& x) {
1477
+ typedef typename internal::unpacket_traits<Packet>::type Scalar;
1478
+
1479
+ // See https://en.cppreference.com/w/cpp/numeric/math/atan2
1480
+ // for how corner cases are supposed to be handled according to the
1481
+ // IEEE floating-point standard (IEC 60559).
1482
+ const Packet kSignMask = pset1<Packet>(-Scalar(0));
1483
+ const Packet kZero = pzero(x);
1484
+ const Packet kOne = pset1<Packet>(Scalar(1));
1485
+ const Packet kPi = pset1<Packet>(Scalar(EIGEN_PI));
1486
+
1487
+ const Packet x_has_signbit = psignbit(x);
1488
+ const Packet y_signmask = pand(y, kSignMask);
1489
+ const Packet x_signmask = pand(x, kSignMask);
1490
+ const Packet result_signmask = pxor(y_signmask, x_signmask);
1491
+ const Packet shift = por(pand(x_has_signbit, kPi), y_signmask);
1492
+
1493
+ const Packet x_and_y_are_same = pcmp_eq(pabs(x), pabs(y));
1494
+ const Packet x_and_y_are_zero = pcmp_eq(por(x, y), kZero);
1495
+
1496
+ Packet arg = pdiv(y, x);
1497
+ arg = pselect(x_and_y_are_same, por(kOne, result_signmask), arg);
1498
+ arg = pselect(x_and_y_are_zero, result_signmask, arg);
1499
+
1500
+ Packet result = patan(arg);
1501
+ result = padd(result, shift);
1502
+ return result;
1503
+ }
1504
+
1505
+ /** \internal \returns the argument of \a a as a complex number */
1506
+ template <typename Packet, std::enable_if_t<is_scalar<Packet>::value, int> = 0>
1507
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet pcarg(const Packet& a) {
1508
+ return Packet(numext::arg(a));
1509
+ }
1510
+
1511
+ /** \internal \returns the argument of \a a as a complex number */
1512
+ template <typename Packet, std::enable_if_t<!is_scalar<Packet>::value, int> = 0>
1513
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet pcarg(const Packet& a) {
1514
+ EIGEN_STATIC_ASSERT(NumTraits<typename unpacket_traits<Packet>::type>::IsComplex,
1515
+ THIS METHOD IS FOR COMPLEX TYPES ONLY)
1516
+ using RealPacket = typename unpacket_traits<Packet>::as_real;
1517
+ // a // r i r i ...
1518
+ RealPacket aflip = pcplxflip(a).v; // i r i r ...
1519
+ RealPacket result = patan2(aflip, a.v); // atan2 crap atan2 crap ...
1520
+ return (Packet)pand(result, peven_mask(result)); // atan2 0 atan2 0 ...
1521
+ }
1522
+
1523
+ } // end namespace internal
1524
+
1525
+ } // end namespace Eigen
1526
+
1527
+ #endif // EIGEN_GENERIC_PACKET_MATH_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/GlobalFunctions.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2010-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_GLOBAL_FUNCTIONS_H
12
+ #define EIGEN_GLOBAL_FUNCTIONS_H
13
+
14
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
15
+
16
+ #define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME, FUNCTOR, DOC_OP, DOC_DETAILS) \
17
+ /** \returns an expression of the coefficient-wise DOC_OP of \a x \
18
+ \ \
19
+ DOC_DETAILS \
20
+ \ \
21
+ \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_##NAME">Math functions</a>, class CwiseUnaryOp \
22
+ */ \
23
+ template <typename Derived> \
24
+ inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> NAME( \
25
+ const Eigen::ArrayBase<Derived>& x);
26
+
27
+ #else
28
+
29
+ #define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME, FUNCTOR, DOC_OP, DOC_DETAILS) \
30
+ template <typename Derived> \
31
+ inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived>(NAME)( \
32
+ const Eigen::ArrayBase<Derived>& x) { \
33
+ return Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived>(x.derived()); \
34
+ }
35
+
36
+ #endif // EIGEN_PARSED_BY_DOXYGEN
37
+
38
+ #define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME, FUNCTOR) \
39
+ \
40
+ template <typename Derived> \
41
+ struct NAME##_retval<ArrayBase<Derived> > { \
42
+ typedef const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> type; \
43
+ }; \
44
+ template <typename Derived> \
45
+ struct NAME##_impl<ArrayBase<Derived> > { \
46
+ static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) { \
47
+ return typename NAME##_retval<ArrayBase<Derived> >::type(x.derived()); \
48
+ } \
49
+ };
50
+
51
+ // IWYU pragma: private
52
+ #include "./InternalHeaderCheck.h"
53
+
54
+ namespace Eigen {
55
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real, scalar_real_op, real part,\sa ArrayBase::real)
56
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag, scalar_imag_op, imaginary part,\sa ArrayBase::imag)
57
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj, scalar_conjugate_op, complex conjugate,\sa ArrayBase::conjugate)
58
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse, scalar_inverse_op, inverse,\sa ArrayBase::inverse)
59
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin, scalar_sin_op, sine,\sa ArrayBase::sin)
60
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos, scalar_cos_op, cosine,\sa ArrayBase::cos)
61
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan, scalar_tan_op, tangent,\sa ArrayBase::tan)
62
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan, scalar_atan_op, arc - tangent,\sa ArrayBase::atan)
63
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin, scalar_asin_op, arc - sine,\sa ArrayBase::asin)
64
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos, scalar_acos_op, arc - consine,\sa ArrayBase::acos)
65
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh, scalar_sinh_op, hyperbolic sine,\sa ArrayBase::sinh)
66
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh, scalar_cosh_op, hyperbolic cosine,\sa ArrayBase::cosh)
67
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh, scalar_tanh_op, hyperbolic tangent,\sa ArrayBase::tanh)
68
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asinh, scalar_asinh_op, inverse hyperbolic sine,\sa ArrayBase::asinh)
69
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acosh, scalar_acosh_op, inverse hyperbolic cosine,\sa ArrayBase::acosh)
70
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atanh, scalar_atanh_op, inverse hyperbolic tangent,\sa ArrayBase::atanh)
71
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(logistic, scalar_logistic_op, logistic function,\sa ArrayBase::logistic)
72
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma, scalar_lgamma_op,
73
+ natural logarithm of the gamma function,\sa ArrayBase::lgamma)
74
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma, scalar_digamma_op, derivative of lgamma,\sa ArrayBase::digamma)
75
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf, scalar_erf_op, error function,\sa ArrayBase::erf)
76
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc, scalar_erfc_op, complement error function,\sa ArrayBase::erfc)
77
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ndtri, scalar_ndtri_op, inverse normal distribution function,\sa ArrayBase::ndtri)
78
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp, scalar_exp_op, exponential,\sa ArrayBase::exp)
79
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1, scalar_expm1_op, exponential of a value minus 1,\sa ArrayBase::expm1)
80
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log, scalar_log_op, natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log)
81
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p, scalar_log1p_op, natural logarithm of 1 plus the value,\sa ArrayBase::log1p)
82
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10, scalar_log10_op, base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log10)
83
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log2, scalar_log2_op, base 2 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log2)
84
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs, scalar_abs_op, absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs)
85
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2, scalar_abs2_op,
86
+ squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2)
87
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg, scalar_arg_op, complex argument,\sa ArrayBase::arg DOXCOMMA MatrixBase::cwiseArg)
88
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(carg, scalar_carg_op,
89
+ complex argument, \sa ArrayBase::carg DOXCOMMA MatrixBase::cwiseCArg)
90
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt, scalar_sqrt_op, square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt)
91
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cbrt, scalar_cbrt_op, cube root,\sa ArrayBase::cbrt DOXCOMMA MatrixBase::cwiseCbrt)
92
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt, scalar_rsqrt_op, reciprocal square root,\sa ArrayBase::rsqrt)
93
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square, scalar_square_op,
94
+ square(power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square)
95
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube, scalar_cube_op, cube(power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube)
96
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rint, scalar_rint_op,
97
+ nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)
98
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round, scalar_round_op,
99
+ nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)
100
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
101
+ floor, scalar_floor_op, nearest integer not greater than the given value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor)
102
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
103
+ ceil, scalar_ceil_op, nearest integer not less than the given value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil)
104
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(trunc, scalar_trunc_op,
105
+ nearest integer not greater in magnitude than the given value,\sa Eigen::trunc DOXCOMMA
106
+ ArrayBase::trunc)
107
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
108
+ isnan, scalar_isnan_op, not -a - number test,\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan)
109
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(
110
+ isinf, scalar_isinf_op, infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf)
111
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite, scalar_isfinite_op,
112
+ finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite)
113
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign, scalar_sign_op, sign(or 0),\sa ArrayBase::sign)
114
+
115
+ template <typename Derived, typename ScalarExponent>
116
+ using GlobalUnaryPowReturnType = std::enable_if_t<
117
+ !internal::is_arithmetic<typename NumTraits<Derived>::Real>::value &&
118
+ internal::is_arithmetic<typename NumTraits<ScalarExponent>::Real>::value,
119
+ CwiseUnaryOp<internal::scalar_unary_pow_op<typename Derived::Scalar, ScalarExponent>, const Derived> >;
120
+
121
+ /** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent.
122
+ *
123
+ * \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given
124
+ * expression (\c Derived::Scalar).
125
+ *
126
+ * \sa ArrayBase::pow()
127
+ *
128
+ * \relates ArrayBase
129
+ */
130
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
131
+ template <typename Derived, typename ScalarExponent>
132
+ EIGEN_DEVICE_FUNC inline const GlobalUnaryPowReturnType<Derived, ScalarExponent> pow(const Eigen::ArrayBase<Derived>& x,
133
+ const ScalarExponent& exponent);
134
+ #else
135
+ template <typename Derived, typename ScalarExponent>
136
+ EIGEN_DEVICE_FUNC inline const GlobalUnaryPowReturnType<Derived, ScalarExponent> pow(const Eigen::ArrayBase<Derived>& x,
137
+ const ScalarExponent& exponent) {
138
+ return GlobalUnaryPowReturnType<Derived, ScalarExponent>(
139
+ x.derived(), internal::scalar_unary_pow_op<typename Derived::Scalar, ScalarExponent>(exponent));
140
+ }
141
+ #endif
142
+
143
+ /** \returns an expression of the coefficient-wise power of \a x to the given array of \a exponents.
144
+ *
145
+ * This function computes the coefficient-wise power.
146
+ *
147
+ * Example: \include Cwise_array_power_array.cpp
148
+ * Output: \verbinclude Cwise_array_power_array.out
149
+ *
150
+ * \sa ArrayBase::pow()
151
+ *
152
+ * \relates ArrayBase
153
+ */
154
+ template <typename Derived, typename ExponentDerived>
155
+ inline const Eigen::CwiseBinaryOp<
156
+ Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived,
157
+ const ExponentDerived>
158
+ pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents) {
159
+ return Eigen::CwiseBinaryOp<
160
+ Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived,
161
+ const ExponentDerived>(x.derived(), exponents.derived());
162
+ }
163
+
164
+ /** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents.
165
+ *
166
+ * This function computes the coefficient-wise power between a scalar and an array of exponents.
167
+ *
168
+ * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression
169
+ * (\c Derived::Scalar).
170
+ *
171
+ * Example: \include Cwise_scalar_power_array.cpp
172
+ * Output: \verbinclude Cwise_scalar_power_array.out
173
+ *
174
+ * \sa ArrayBase::pow()
175
+ *
176
+ * \relates ArrayBase
177
+ */
178
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
179
+ template <typename Scalar, typename Derived>
180
+ inline const CwiseBinaryOp<internal::scalar_pow_op<Scalar, Derived::Scalar>, Constant<Scalar>, Derived> pow(
181
+ const Scalar& x, const Eigen::ArrayBase<Derived>& x);
182
+ #else
183
+ template <typename Scalar, typename Derived>
184
+ EIGEN_DEVICE_FUNC inline const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(
185
+ typename internal::promote_scalar_arg<typename Derived::Scalar EIGEN_COMMA Scalar EIGEN_COMMA
186
+ EIGEN_SCALAR_BINARY_SUPPORTED(pow, Scalar,
187
+ typename Derived::Scalar)>::type,
188
+ Derived, pow) pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents) {
189
+ typedef
190
+ typename internal::promote_scalar_arg<typename Derived::Scalar, Scalar,
191
+ EIGEN_SCALAR_BINARY_SUPPORTED(pow, Scalar, typename Derived::Scalar)>::type
192
+ PromotedScalar;
193
+ return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedScalar, Derived, pow)(
194
+ typename internal::plain_constant_type<Derived, PromotedScalar>::type(
195
+ exponents.derived().rows(), exponents.derived().cols(), internal::scalar_constant_op<PromotedScalar>(x)),
196
+ exponents.derived());
197
+ }
198
+ #endif
199
+
200
+ /** \returns an expression of the coefficient-wise atan2(\a x, \a y). \a x and \a y must be of the same type.
201
+ *
202
+ * This function computes the coefficient-wise atan2().
203
+ *
204
+ * \sa ArrayBase::atan2()
205
+ *
206
+ * \relates ArrayBase
207
+ */
208
+ template <typename LhsDerived, typename RhsDerived>
209
+ inline const std::enable_if_t<
210
+ std::is_same<typename LhsDerived::Scalar, typename RhsDerived::Scalar>::value,
211
+ Eigen::CwiseBinaryOp<Eigen::internal::scalar_atan2_op<typename LhsDerived::Scalar, typename RhsDerived::Scalar>,
212
+ const LhsDerived, const RhsDerived> >
213
+ atan2(const Eigen::ArrayBase<LhsDerived>& x, const Eigen::ArrayBase<RhsDerived>& exponents) {
214
+ return Eigen::CwiseBinaryOp<
215
+ Eigen::internal::scalar_atan2_op<typename LhsDerived::Scalar, typename RhsDerived::Scalar>, const LhsDerived,
216
+ const RhsDerived>(x.derived(), exponents.derived());
217
+ }
218
+
219
+ namespace internal {
220
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real, scalar_real_op)
221
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag, scalar_imag_op)
222
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2, scalar_abs2_op)
223
+ } // namespace internal
224
+ } // namespace Eigen
225
+
226
+ // TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random,
227
+ // internal::isApprox...)
228
+
229
+ #endif // EIGEN_GLOBAL_FUNCTIONS_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/IO.h ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_IO_H
12
+ #define EIGEN_IO_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ enum { DontAlignCols = 1 };
20
+ enum { StreamPrecision = -1, FullPrecision = -2 };
21
+
22
+ namespace internal {
23
+ template <typename Derived>
24
+ std::ostream& print_matrix(std::ostream& s, const Derived& _m, const IOFormat& fmt);
25
+ }
26
+
27
+ /** \class IOFormat
28
+ * \ingroup Core_Module
29
+ *
30
+ * \brief Stores a set of parameters controlling the way matrices are printed
31
+ *
32
+ * List of available parameters:
33
+ * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c
34
+ * FullPrecision. The default is the special value \c StreamPrecision which means to use the stream's own precision
35
+ * setting, as set for instance using \c cout.precision(3). The other special value \c FullPrecision means that the
36
+ * number of digits will be computed to match the full precision of each floating-point type.
37
+ * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c
38
+ * DontAlignCols which allows to disable the alignment of columns, resulting in faster code.
39
+ * - \b coeffSeparator string printed between two coefficients of the same row
40
+ * - \b rowSeparator string printed between two rows
41
+ * - \b rowPrefix string printed at the beginning of each row
42
+ * - \b rowSuffix string printed at the end of each row
43
+ * - \b matPrefix string printed at the beginning of the matrix
44
+ * - \b matSuffix string printed at the end of the matrix
45
+ * - \b fill character printed to fill the empty space in aligned columns
46
+ *
47
+ * Example: \include IOFormat.cpp
48
+ * Output: \verbinclude IOFormat.out
49
+ *
50
+ * \sa DenseBase::format(), class WithFormat
51
+ */
52
+ struct IOFormat {
53
+ /** Default constructor, see class IOFormat for the meaning of the parameters */
54
+ IOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _coeffSeparator = " ",
55
+ const std::string& _rowSeparator = "\n", const std::string& _rowPrefix = "",
56
+ const std::string& _rowSuffix = "", const std::string& _matPrefix = "", const std::string& _matSuffix = "",
57
+ const char _fill = ' ')
58
+ : matPrefix(_matPrefix),
59
+ matSuffix(_matSuffix),
60
+ rowPrefix(_rowPrefix),
61
+ rowSuffix(_rowSuffix),
62
+ rowSeparator(_rowSeparator),
63
+ rowSpacer(""),
64
+ coeffSeparator(_coeffSeparator),
65
+ fill(_fill),
66
+ precision(_precision),
67
+ flags(_flags) {
68
+ // TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline
69
+ // don't add rowSpacer if columns are not to be aligned
70
+ if ((flags & DontAlignCols)) return;
71
+ int i = int(matSuffix.length()) - 1;
72
+ while (i >= 0 && matSuffix[i] != '\n') {
73
+ rowSpacer += ' ';
74
+ i--;
75
+ }
76
+ }
77
+ std::string matPrefix, matSuffix;
78
+ std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;
79
+ std::string coeffSeparator;
80
+ char fill;
81
+ int precision;
82
+ int flags;
83
+ };
84
+
85
+ /** \class WithFormat
86
+ * \ingroup Core_Module
87
+ *
88
+ * \brief Pseudo expression providing matrix output with given format
89
+ *
90
+ * \tparam ExpressionType the type of the object on which IO stream operations are performed
91
+ *
92
+ * This class represents an expression with stream operators controlled by a given IOFormat.
93
+ * It is the return type of DenseBase::format()
94
+ * and most of the time this is the only way it is used.
95
+ *
96
+ * See class IOFormat for some examples.
97
+ *
98
+ * \sa DenseBase::format(), class IOFormat
99
+ */
100
+ template <typename ExpressionType>
101
+ class WithFormat {
102
+ public:
103
+ WithFormat(const ExpressionType& matrix, const IOFormat& format) : m_matrix(matrix), m_format(format) {}
104
+
105
+ friend std::ostream& operator<<(std::ostream& s, const WithFormat& wf) {
106
+ return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format);
107
+ }
108
+
109
+ protected:
110
+ typename ExpressionType::Nested m_matrix;
111
+ IOFormat m_format;
112
+ };
113
+
114
+ namespace internal {
115
+
116
+ // NOTE: This helper is kept for backward compatibility with previous code specializing
117
+ // this internal::significant_decimals_impl structure. In the future we should directly
118
+ // call max_digits10().
119
+ template <typename Scalar>
120
+ struct significant_decimals_impl {
121
+ static inline int run() { return NumTraits<Scalar>::max_digits10(); }
122
+ };
123
+
124
+ /** \internal
125
+ * print the matrix \a _m to the output stream \a s using the output format \a fmt */
126
+ template <typename Derived>
127
+ std::ostream& print_matrix(std::ostream& s, const Derived& _m, const IOFormat& fmt) {
128
+ using internal::is_same;
129
+
130
+ if (_m.size() == 0) {
131
+ s << fmt.matPrefix << fmt.matSuffix;
132
+ return s;
133
+ }
134
+
135
+ typename Derived::Nested m = _m;
136
+ typedef typename Derived::Scalar Scalar;
137
+ typedef std::conditional_t<is_same<Scalar, char>::value || is_same<Scalar, unsigned char>::value ||
138
+ is_same<Scalar, numext::int8_t>::value || is_same<Scalar, numext::uint8_t>::value,
139
+ int,
140
+ std::conditional_t<is_same<Scalar, std::complex<char> >::value ||
141
+ is_same<Scalar, std::complex<unsigned char> >::value ||
142
+ is_same<Scalar, std::complex<numext::int8_t> >::value ||
143
+ is_same<Scalar, std::complex<numext::uint8_t> >::value,
144
+ std::complex<int>, const Scalar&> >
145
+ PrintType;
146
+
147
+ Index width = 0;
148
+
149
+ std::streamsize explicit_precision;
150
+ if (fmt.precision == StreamPrecision) {
151
+ explicit_precision = 0;
152
+ } else if (fmt.precision == FullPrecision) {
153
+ if (NumTraits<Scalar>::IsInteger) {
154
+ explicit_precision = 0;
155
+ } else {
156
+ explicit_precision = significant_decimals_impl<Scalar>::run();
157
+ }
158
+ } else {
159
+ explicit_precision = fmt.precision;
160
+ }
161
+
162
+ std::streamsize old_precision = 0;
163
+ if (explicit_precision) old_precision = s.precision(explicit_precision);
164
+
165
+ bool align_cols = !(fmt.flags & DontAlignCols);
166
+ if (align_cols) {
167
+ // compute the largest width
168
+ for (Index j = 0; j < m.cols(); ++j)
169
+ for (Index i = 0; i < m.rows(); ++i) {
170
+ std::stringstream sstr;
171
+ sstr.copyfmt(s);
172
+ sstr << static_cast<PrintType>(m.coeff(i, j));
173
+ width = std::max<Index>(width, Index(sstr.str().length()));
174
+ }
175
+ }
176
+ std::streamsize old_width = s.width();
177
+ char old_fill_character = s.fill();
178
+ s << fmt.matPrefix;
179
+ for (Index i = 0; i < m.rows(); ++i) {
180
+ if (i) s << fmt.rowSpacer;
181
+ s << fmt.rowPrefix;
182
+ if (width) {
183
+ s.fill(fmt.fill);
184
+ s.width(width);
185
+ }
186
+ s << static_cast<PrintType>(m.coeff(i, 0));
187
+ for (Index j = 1; j < m.cols(); ++j) {
188
+ s << fmt.coeffSeparator;
189
+ if (width) {
190
+ s.fill(fmt.fill);
191
+ s.width(width);
192
+ }
193
+ s << static_cast<PrintType>(m.coeff(i, j));
194
+ }
195
+ s << fmt.rowSuffix;
196
+ if (i < m.rows() - 1) s << fmt.rowSeparator;
197
+ }
198
+ s << fmt.matSuffix;
199
+ if (explicit_precision) s.precision(old_precision);
200
+ if (width) {
201
+ s.fill(old_fill_character);
202
+ s.width(old_width);
203
+ }
204
+ return s;
205
+ }
206
+
207
+ } // end namespace internal
208
+
209
+ /** \relates DenseBase
210
+ *
211
+ * Outputs the matrix, to the given stream.
212
+ *
213
+ * If you wish to print the matrix with a format different than the default, use DenseBase::format().
214
+ *
215
+ * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers.
216
+ * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default
217
+ * parameters.
218
+ *
219
+ * \sa DenseBase::format()
220
+ */
221
+ template <typename Derived>
222
+ std::ostream& operator<<(std::ostream& s, const DenseBase<Derived>& m) {
223
+ return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);
224
+ }
225
+
226
+ template <typename Derived>
227
+ std::ostream& operator<<(std::ostream& s, const DiagonalBase<Derived>& m) {
228
+ return internal::print_matrix(s, m.derived(), EIGEN_DEFAULT_IO_FORMAT);
229
+ }
230
+
231
+ } // end namespace Eigen
232
+
233
+ #endif // EIGEN_IO_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/InternalHeaderCheck.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #ifndef EIGEN_CORE_MODULE_H
2
+ #error "Please include Eigen/Core instead of including headers inside the src directory directly."
3
+ #endif
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Inverse.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_INVERSE_H
11
+ #define EIGEN_INVERSE_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ template <typename XprType, typename StorageKind>
19
+ class InverseImpl;
20
+
21
+ namespace internal {
22
+
23
+ template <typename XprType>
24
+ struct traits<Inverse<XprType> > : traits<typename XprType::PlainObject> {
25
+ typedef typename XprType::PlainObject PlainObject;
26
+ typedef traits<PlainObject> BaseTraits;
27
+ enum { Flags = BaseTraits::Flags & RowMajorBit };
28
+ };
29
+
30
+ } // end namespace internal
31
+
32
+ /** \class Inverse
33
+ *
34
+ * \brief Expression of the inverse of another expression
35
+ *
36
+ * \tparam XprType the type of the expression we are taking the inverse
37
+ *
38
+ * This class represents an abstract expression of A.inverse()
39
+ * and most of the time this is the only way it is used.
40
+ *
41
+ */
42
+ template <typename XprType>
43
+ class Inverse : public InverseImpl<XprType, typename internal::traits<XprType>::StorageKind> {
44
+ public:
45
+ typedef typename XprType::StorageIndex StorageIndex;
46
+ typedef typename XprType::Scalar Scalar;
47
+ typedef typename internal::ref_selector<XprType>::type XprTypeNested;
48
+ typedef internal::remove_all_t<XprTypeNested> XprTypeNestedCleaned;
49
+ typedef typename internal::ref_selector<Inverse>::type Nested;
50
+ typedef internal::remove_all_t<XprType> NestedExpression;
51
+
52
+ explicit EIGEN_DEVICE_FUNC Inverse(const XprType& xpr) : m_xpr(xpr) {}
53
+
54
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_xpr.cols(); }
55
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_xpr.rows(); }
56
+
57
+ EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; }
58
+
59
+ protected:
60
+ XprTypeNested m_xpr;
61
+ };
62
+
63
+ // Generic API dispatcher
64
+ template <typename XprType, typename StorageKind>
65
+ class InverseImpl : public internal::generic_xpr_base<Inverse<XprType> >::type {
66
+ public:
67
+ typedef typename internal::generic_xpr_base<Inverse<XprType> >::type Base;
68
+ typedef typename XprType::Scalar Scalar;
69
+
70
+ private:
71
+ Scalar coeff(Index row, Index col) const;
72
+ Scalar coeff(Index i) const;
73
+ };
74
+
75
+ namespace internal {
76
+
77
+ /** \internal
78
+ * \brief Default evaluator for Inverse expression.
79
+ *
80
+ * This default evaluator for Inverse expression simply evaluate the inverse into a temporary
81
+ * by a call to internal::call_assignment_no_alias.
82
+ * Therefore, inverse implementers only have to specialize Assignment<Dst,Inverse<...>, ...> for
83
+ * there own nested expression.
84
+ *
85
+ * \sa class Inverse
86
+ */
87
+ template <typename ArgType>
88
+ struct unary_evaluator<Inverse<ArgType> > : public evaluator<typename Inverse<ArgType>::PlainObject> {
89
+ typedef Inverse<ArgType> InverseType;
90
+ typedef typename InverseType::PlainObject PlainObject;
91
+ typedef evaluator<PlainObject> Base;
92
+
93
+ enum { Flags = Base::Flags | EvalBeforeNestingBit };
94
+
95
+ unary_evaluator(const InverseType& inv_xpr) : m_result(inv_xpr.rows(), inv_xpr.cols()) {
96
+ internal::construct_at<Base>(this, m_result);
97
+ internal::call_assignment_no_alias(m_result, inv_xpr);
98
+ }
99
+
100
+ protected:
101
+ PlainObject m_result;
102
+ };
103
+
104
+ } // end namespace internal
105
+
106
+ } // end namespace Eigen
107
+
108
+ #endif // EIGEN_INVERSE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Map.h ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_MAP_H
12
+ #define EIGEN_MAP_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+ template <typename PlainObjectType, int MapOptions, typename StrideType>
21
+ struct traits<Map<PlainObjectType, MapOptions, StrideType> > : public traits<PlainObjectType> {
22
+ typedef traits<PlainObjectType> TraitsBase;
23
+ enum {
24
+ PlainObjectTypeInnerSize = ((traits<PlainObjectType>::Flags & RowMajorBit) == RowMajorBit)
25
+ ? PlainObjectType::ColsAtCompileTime
26
+ : PlainObjectType::RowsAtCompileTime,
27
+
28
+ InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
29
+ ? int(PlainObjectType::InnerStrideAtCompileTime)
30
+ : int(StrideType::InnerStrideAtCompileTime),
31
+ OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
32
+ ? (InnerStrideAtCompileTime == Dynamic || PlainObjectTypeInnerSize == Dynamic
33
+ ? Dynamic
34
+ : int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize))
35
+ : int(StrideType::OuterStrideAtCompileTime),
36
+ Alignment = int(MapOptions) & int(AlignedMask),
37
+ Flags0 = TraitsBase::Flags & (~NestByRefBit),
38
+ Flags = is_lvalue<PlainObjectType>::value ? int(Flags0) : (int(Flags0) & ~LvalueBit)
39
+ };
40
+
41
+ private:
42
+ enum { Options }; // Expressions don't have Options
43
+ };
44
+ } // namespace internal
45
+
46
+ /** \class Map
47
+ * \ingroup Core_Module
48
+ *
49
+ * \brief A matrix or vector expression mapping an existing array of data.
50
+ *
51
+ * \tparam PlainObjectType the equivalent matrix type of the mapped data
52
+ * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, \c #Aligned64, \c #Aligned32,
53
+ * \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies
54
+ * strides. By default, Map assumes the memory layout of an ordinary, contiguous array. This can be overridden by
55
+ * specifying strides. The type passed here must be a specialization of the Stride template, see examples below.
56
+ *
57
+ * This class represents a matrix or vector expression mapping an existing array of data.
58
+ * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
59
+ * such as plain C arrays or structures from other libraries. By default, it assumes that the
60
+ * data is laid out contiguously in memory. You can however override this by explicitly specifying
61
+ * inner and outer strides.
62
+ *
63
+ * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix:
64
+ * \include Map_simple.cpp
65
+ * Output: \verbinclude Map_simple.out
66
+ *
67
+ * If you need to map non-contiguous arrays, you can do so by specifying strides:
68
+ *
69
+ * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer
70
+ * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time
71
+ * fixed value.
72
+ * \include Map_inner_stride.cpp
73
+ * Output: \verbinclude Map_inner_stride.out
74
+ *
75
+ * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping
76
+ * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.
77
+ * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is
78
+ * a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride
79
+ * is \c Dynamic
80
+ * \include Map_outer_stride.cpp
81
+ * Output: \verbinclude Map_outer_stride.out
82
+ *
83
+ * For more details and for an example of specifying both an inner and an outer stride, see class Stride.
84
+ *
85
+ * \b Tip: to change the array of data mapped by a Map object, you can use the C++
86
+ * placement new syntax:
87
+ *
88
+ * Example: \include Map_placement_new.cpp
89
+ * Output: \verbinclude Map_placement_new.out
90
+ *
91
+ * This class is the return type of PlainObjectBase::Map() but can also be used directly.
92
+ *
93
+ * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
94
+ */
95
+ template <typename PlainObjectType, int MapOptions, typename StrideType>
96
+ class Map : public MapBase<Map<PlainObjectType, MapOptions, StrideType> > {
97
+ public:
98
+ typedef MapBase<Map> Base;
99
+ EIGEN_DENSE_PUBLIC_INTERFACE(Map)
100
+
101
+ typedef typename Base::PointerType PointerType;
102
+ typedef PointerType PointerArgType;
103
+ EIGEN_DEVICE_FUNC inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
104
+
105
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const {
106
+ return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
107
+ }
108
+
109
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const {
110
+ return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
111
+ : internal::traits<Map>::OuterStrideAtCompileTime != Dynamic
112
+ ? Index(internal::traits<Map>::OuterStrideAtCompileTime)
113
+ : IsVectorAtCompileTime ? (this->size() * innerStride())
114
+ : int(Flags) & RowMajorBit ? (this->cols() * innerStride())
115
+ : (this->rows() * innerStride());
116
+ }
117
+
118
+ /** Constructor in the fixed-size case.
119
+ *
120
+ * \param dataPtr pointer to the array to map
121
+ * \param stride optional Stride object, passing the strides.
122
+ */
123
+ EIGEN_DEVICE_FUNC explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType())
124
+ : Base(cast_to_pointer_type(dataPtr)), m_stride(stride) {}
125
+
126
+ /** Constructor in the dynamic-size vector case.
127
+ *
128
+ * \param dataPtr pointer to the array to map
129
+ * \param size the size of the vector expression
130
+ * \param stride optional Stride object, passing the strides.
131
+ */
132
+ EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType())
133
+ : Base(cast_to_pointer_type(dataPtr), size), m_stride(stride) {}
134
+
135
+ /** Constructor in the dynamic-size matrix case.
136
+ *
137
+ * \param dataPtr pointer to the array to map
138
+ * \param rows the number of rows of the matrix expression
139
+ * \param cols the number of columns of the matrix expression
140
+ * \param stride optional Stride object, passing the strides.
141
+ */
142
+ EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType())
143
+ : Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride) {}
144
+
145
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
146
+
147
+ protected:
148
+ StrideType m_stride;
149
+ };
150
+
151
+ } // end namespace Eigen
152
+
153
+ #endif // EIGEN_MAP_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/MapBase.h ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_MAPBASE_H
12
+ #define EIGEN_MAPBASE_H
13
+
14
+ #define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \
15
+ EIGEN_STATIC_ASSERT((int(internal::evaluator<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
16
+ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
17
+
18
+ // IWYU pragma: private
19
+ #include "./InternalHeaderCheck.h"
20
+
21
+ namespace Eigen {
22
+
23
+ /** \ingroup Core_Module
24
+ *
25
+ * \brief Base class for dense Map and Block expression with direct access
26
+ *
27
+ * This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense
28
+ * Map and Block objects with direct access.
29
+ * Typical users do not have to directly deal with this class.
30
+ *
31
+ * This class can be extended by through the macro plugin \c EIGEN_MAPBASE_PLUGIN.
32
+ * See \link TopicCustomizing_Plugins customizing Eigen \endlink for details.
33
+ *
34
+ * The \c Derived class has to provide the following two methods describing the memory layout:
35
+ * \code Index innerStride() const; \endcode
36
+ * \code Index outerStride() const; \endcode
37
+ *
38
+ * \sa class Map, class Block
39
+ */
40
+ template <typename Derived>
41
+ class MapBase<Derived, ReadOnlyAccessors> : public internal::dense_xpr_base<Derived>::type {
42
+ public:
43
+ typedef typename internal::dense_xpr_base<Derived>::type Base;
44
+ enum {
45
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
46
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
47
+ InnerStrideAtCompileTime = internal::traits<Derived>::InnerStrideAtCompileTime,
48
+ SizeAtCompileTime = Base::SizeAtCompileTime
49
+ };
50
+
51
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
52
+ typedef typename internal::traits<Derived>::Scalar Scalar;
53
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
54
+ typedef typename NumTraits<Scalar>::Real RealScalar;
55
+ typedef std::conditional_t<bool(internal::is_lvalue<Derived>::value), Scalar*, const Scalar*> PointerType;
56
+
57
+ using Base::derived;
58
+ // using Base::RowsAtCompileTime;
59
+ // using Base::ColsAtCompileTime;
60
+ // using Base::SizeAtCompileTime;
61
+ using Base::Flags;
62
+ using Base::IsRowMajor;
63
+ using Base::IsVectorAtCompileTime;
64
+ using Base::MaxColsAtCompileTime;
65
+ using Base::MaxRowsAtCompileTime;
66
+ using Base::MaxSizeAtCompileTime;
67
+
68
+ using Base::coeff;
69
+ using Base::coeffRef;
70
+ using Base::cols;
71
+ using Base::eval;
72
+ using Base::lazyAssign;
73
+ using Base::rows;
74
+ using Base::size;
75
+
76
+ using Base::colStride;
77
+ using Base::innerStride;
78
+ using Base::outerStride;
79
+ using Base::rowStride;
80
+
81
+ // bug 217 - compile error on ICC 11.1
82
+ using Base::operator=;
83
+
84
+ typedef typename Base::CoeffReturnType CoeffReturnType;
85
+
86
+ /** \copydoc DenseBase::rows() */
87
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_rows.value(); }
88
+ /** \copydoc DenseBase::cols() */
89
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_cols.value(); }
90
+
91
+ /** Returns a pointer to the first coefficient of the matrix or vector.
92
+ *
93
+ * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().
94
+ *
95
+ * \sa innerStride(), outerStride()
96
+ */
97
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_data; }
98
+
99
+ /** \copydoc PlainObjectBase::coeff(Index,Index) const */
100
+ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index rowId, Index colId) const {
101
+ return m_data[colId * colStride() + rowId * rowStride()];
102
+ }
103
+
104
+ /** \copydoc PlainObjectBase::coeff(Index) const */
105
+ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index index) const {
106
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
107
+ return m_data[index * innerStride()];
108
+ }
109
+
110
+ /** \copydoc PlainObjectBase::coeffRef(Index,Index) const */
111
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
112
+ return this->m_data[colId * colStride() + rowId * rowStride()];
113
+ }
114
+
115
+ /** \copydoc PlainObjectBase::coeffRef(Index) const */
116
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const {
117
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
118
+ return this->m_data[index * innerStride()];
119
+ }
120
+
121
+ /** \internal */
122
+ template <int LoadMode>
123
+ inline PacketScalar packet(Index rowId, Index colId) const {
124
+ return internal::ploadt<PacketScalar, LoadMode>(m_data + (colId * colStride() + rowId * rowStride()));
125
+ }
126
+
127
+ /** \internal */
128
+ template <int LoadMode>
129
+ inline PacketScalar packet(Index index) const {
130
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
131
+ return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
132
+ }
133
+
134
+ /** \internal Constructor for fixed size matrices or vectors */
135
+ EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr)
136
+ : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) {
137
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
138
+ checkSanity<Derived>();
139
+ }
140
+
141
+ /** \internal Constructor for dynamically sized vectors */
142
+ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize)
143
+ : m_data(dataPtr),
144
+ m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),
145
+ m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) {
146
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
147
+ eigen_assert(vecSize >= 0);
148
+ eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);
149
+ checkSanity<Derived>();
150
+ }
151
+
152
+ /** \internal Constructor for dynamically sized matrices */
153
+ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols)
154
+ : m_data(dataPtr), m_rows(rows), m_cols(cols) {
155
+ eigen_assert((dataPtr == 0) || (rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) &&
156
+ cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
157
+ checkSanity<Derived>();
158
+ }
159
+
160
+ #ifdef EIGEN_MAPBASE_PLUGIN
161
+ #include EIGEN_MAPBASE_PLUGIN
162
+ #endif
163
+
164
+ protected:
165
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase)
166
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase)
167
+
168
+ template <typename T>
169
+ EIGEN_DEVICE_FUNC void checkSanity(std::enable_if_t<(internal::traits<T>::Alignment > 0), void*> = 0) const {
170
+ // Temporary macro to allow scalars to not be properly aligned. This is while we sort out failures
171
+ // in TensorFlow Lite that are currently relying on this UB.
172
+ #ifndef EIGEN_ALLOW_UNALIGNED_SCALARS
173
+ // Pointer must be aligned to the Scalar type, otherwise we get UB.
174
+ eigen_assert((std::uintptr_t(m_data) % alignof(Scalar) == 0) && "data is not scalar-aligned");
175
+ #endif
176
+ #if EIGEN_MAX_ALIGN_BYTES > 0
177
+ // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible
178
+ // value:
179
+ const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime);
180
+ EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride);
181
+ eigen_assert((((std::uintptr_t(m_data) % internal::traits<Derived>::Alignment) == 0) ||
182
+ (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment) &&
183
+ "data is not aligned");
184
+ #endif
185
+ }
186
+
187
+ template <typename T>
188
+ EIGEN_DEVICE_FUNC void checkSanity(std::enable_if_t<internal::traits<T>::Alignment == 0, void*> = 0) const {
189
+ #ifndef EIGEN_ALLOW_UNALIGNED_SCALARS
190
+ // Pointer must be aligned to the Scalar type, otherwise we get UB.
191
+ eigen_assert((std::uintptr_t(m_data) % alignof(Scalar) == 0) && "data is not scalar-aligned");
192
+ #endif
193
+ }
194
+
195
+ PointerType m_data;
196
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
197
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
198
+ };
199
+
200
+ /** \ingroup Core_Module
201
+ *
202
+ * \brief Base class for non-const dense Map and Block expression with direct access
203
+ *
204
+ * This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of
205
+ * dense Map and Block objects with direct access.
206
+ * It inherits MapBase<Derived, ReadOnlyAccessors> which defines the const variant for reading specific entries.
207
+ *
208
+ * \sa class Map, class Block
209
+ */
210
+ template <typename Derived>
211
+ class MapBase<Derived, WriteAccessors> : public MapBase<Derived, ReadOnlyAccessors> {
212
+ typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
213
+
214
+ public:
215
+ typedef MapBase<Derived, ReadOnlyAccessors> Base;
216
+
217
+ typedef typename Base::Scalar Scalar;
218
+ typedef typename Base::PacketScalar PacketScalar;
219
+ typedef typename Base::StorageIndex StorageIndex;
220
+ typedef typename Base::PointerType PointerType;
221
+
222
+ using Base::coeff;
223
+ using Base::coeffRef;
224
+ using Base::cols;
225
+ using Base::derived;
226
+ using Base::rows;
227
+ using Base::size;
228
+
229
+ using Base::colStride;
230
+ using Base::innerStride;
231
+ using Base::outerStride;
232
+ using Base::rowStride;
233
+
234
+ typedef std::conditional_t<internal::is_lvalue<Derived>::value, Scalar, const Scalar> ScalarWithConstIfNotLvalue;
235
+
236
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return this->m_data; }
237
+ EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() {
238
+ return this->m_data;
239
+ } // no const-cast here so non-const-correct code will give a compile error
240
+
241
+ EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) {
242
+ return this->m_data[col * colStride() + row * rowStride()];
243
+ }
244
+
245
+ EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index index) {
246
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
247
+ return this->m_data[index * innerStride()];
248
+ }
249
+
250
+ template <int StoreMode>
251
+ inline void writePacket(Index row, Index col, const PacketScalar& val) {
252
+ internal::pstoret<Scalar, PacketScalar, StoreMode>(this->m_data + (col * colStride() + row * rowStride()), val);
253
+ }
254
+
255
+ template <int StoreMode>
256
+ inline void writePacket(Index index, const PacketScalar& val) {
257
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
258
+ internal::pstoret<Scalar, PacketScalar, StoreMode>(this->m_data + index * innerStride(), val);
259
+ }
260
+
261
+ EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}
262
+ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}
263
+ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {}
264
+
265
+ EIGEN_DEVICE_FUNC Derived& operator=(const MapBase& other) {
266
+ ReadOnlyMapBase::Base::operator=(other);
267
+ return derived();
268
+ }
269
+
270
+ // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
271
+ // see bugs 821 and 920.
272
+ using ReadOnlyMapBase::Base::operator=;
273
+
274
+ protected:
275
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase)
276
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase)
277
+ };
278
+
279
+ #undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
280
+
281
+ } // end namespace Eigen
282
+
283
+ #endif // EIGEN_MAPBASE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/MathFunctionsImpl.h ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
5
+ // Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_MATHFUNCTIONSIMPL_H
12
+ #define EIGEN_MATHFUNCTIONSIMPL_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+
21
+ /** \internal Fast reciprocal using Newton-Raphson's method.
22
+
23
+ Preconditions:
24
+ 1. The starting guess provided in approx_a_recip must have at least half
25
+ the leading mantissa bits in the correct result, such that a single
26
+ Newton-Raphson step is sufficient to get within 1-2 ulps of the currect
27
+ result.
28
+ 2. If a is zero, approx_a_recip must be infinite with the same sign as a.
29
+ 3. If a is infinite, approx_a_recip must be zero with the same sign as a.
30
+
31
+ If the preconditions are satisfied, which they are for for the _*_rcp_ps
32
+ instructions on x86, the result has a maximum relative error of 2 ulps,
33
+ and correctly handles reciprocals of zero, infinity, and NaN.
34
+ */
35
+ template <typename Packet, int Steps>
36
+ struct generic_reciprocal_newton_step {
37
+ static_assert(Steps > 0, "Steps must be at least 1.");
38
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_a_recip) {
39
+ using Scalar = typename unpacket_traits<Packet>::type;
40
+ const Packet two = pset1<Packet>(Scalar(2));
41
+ // Refine the approximation using one Newton-Raphson step:
42
+ // x_{i} = x_{i-1} * (2 - a * x_{i-1})
43
+ const Packet x = generic_reciprocal_newton_step<Packet, Steps - 1>::run(a, approx_a_recip);
44
+ const Packet tmp = pnmadd(a, x, two);
45
+ // If tmp is NaN, it means that a is either +/-0 or +/-Inf.
46
+ // In this case return the approximation directly.
47
+ const Packet is_not_nan = pcmp_eq(tmp, tmp);
48
+ return pselect(is_not_nan, pmul(x, tmp), x);
49
+ }
50
+ };
51
+
52
+ template <typename Packet>
53
+ struct generic_reciprocal_newton_step<Packet, 0> {
54
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& /*unused*/, const Packet& approx_rsqrt) {
55
+ return approx_rsqrt;
56
+ }
57
+ };
58
+
59
+ /** \internal Fast reciprocal sqrt using Newton-Raphson's method.
60
+
61
+ Preconditions:
62
+ 1. The starting guess provided in approx_a_recip must have at least half
63
+ the leading mantissa bits in the correct result, such that a single
64
+ Newton-Raphson step is sufficient to get within 1-2 ulps of the currect
65
+ result.
66
+ 2. If a is zero, approx_a_recip must be infinite with the same sign as a.
67
+ 3. If a is infinite, approx_a_recip must be zero with the same sign as a.
68
+
69
+ If the preconditions are satisfied, which they are for for the _*_rcp_ps
70
+ instructions on x86, the result has a maximum relative error of 2 ulps,
71
+ and correctly handles zero, infinity, and NaN. Positive denormals are
72
+ treated as zero.
73
+ */
74
+ template <typename Packet, int Steps>
75
+ struct generic_rsqrt_newton_step {
76
+ static_assert(Steps > 0, "Steps must be at least 1.");
77
+ using Scalar = typename unpacket_traits<Packet>::type;
78
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_rsqrt) {
79
+ constexpr Scalar kMinusHalf = Scalar(-1) / Scalar(2);
80
+ const Packet cst_minus_half = pset1<Packet>(kMinusHalf);
81
+ const Packet cst_minus_one = pset1<Packet>(Scalar(-1));
82
+
83
+ Packet inv_sqrt = approx_rsqrt;
84
+ for (int step = 0; step < Steps; ++step) {
85
+ // Refine the approximation using one Newton-Raphson step:
86
+ // h_n = (x * inv_sqrt) * inv_sqrt - 1 (so that h_n is nearly 0).
87
+ // inv_sqrt = inv_sqrt - 0.5 * inv_sqrt * h_n
88
+ Packet r2 = pmul(a, inv_sqrt);
89
+ Packet half_r = pmul(inv_sqrt, cst_minus_half);
90
+ Packet h_n = pmadd(r2, inv_sqrt, cst_minus_one);
91
+ inv_sqrt = pmadd(half_r, h_n, inv_sqrt);
92
+ }
93
+
94
+ // If x is NaN, then either:
95
+ // 1) the input is NaN
96
+ // 2) zero and infinity were multiplied
97
+ // In either of these cases, return approx_rsqrt
98
+ return pselect(pisnan(inv_sqrt), approx_rsqrt, inv_sqrt);
99
+ }
100
+ };
101
+
102
+ template <typename Packet>
103
+ struct generic_rsqrt_newton_step<Packet, 0> {
104
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& /*unused*/, const Packet& approx_rsqrt) {
105
+ return approx_rsqrt;
106
+ }
107
+ };
108
+
109
+ /** \internal Fast sqrt using Newton-Raphson's method.
110
+
111
+ Preconditions:
112
+ 1. The starting guess for the reciprocal sqrt provided in approx_rsqrt must
113
+ have at least half the leading mantissa bits in the correct result, such
114
+ that a single Newton-Raphson step is sufficient to get within 1-2 ulps of
115
+ the currect result.
116
+ 2. If a is zero, approx_rsqrt must be infinite.
117
+ 3. If a is infinite, approx_rsqrt must be zero.
118
+
119
+ If the preconditions are satisfied, which they are for for the _*_rsqrt_ps
120
+ instructions on x86, the result has a maximum relative error of 2 ulps,
121
+ and correctly handles zero and infinity, and NaN. Positive denormal inputs
122
+ are treated as zero.
123
+ */
124
+ template <typename Packet, int Steps = 1>
125
+ struct generic_sqrt_newton_step {
126
+ static_assert(Steps > 0, "Steps must be at least 1.");
127
+
128
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Packet run(const Packet& a, const Packet& approx_rsqrt) {
129
+ using Scalar = typename unpacket_traits<Packet>::type;
130
+ const Packet one_point_five = pset1<Packet>(Scalar(1.5));
131
+ const Packet minus_half = pset1<Packet>(Scalar(-0.5));
132
+ // If a is inf or zero, return a directly.
133
+ const Packet inf_mask = pcmp_eq(a, pset1<Packet>(NumTraits<Scalar>::infinity()));
134
+ const Packet return_a = por(pcmp_eq(a, pzero(a)), inf_mask);
135
+ // Do a single step of Newton's iteration for reciprocal square root:
136
+ // x_{n+1} = x_n * (1.5 + (-0.5 * x_n) * (a * x_n))).
137
+ // The Newton's step is computed this way to avoid over/under-flows.
138
+ Packet rsqrt = pmul(approx_rsqrt, pmadd(pmul(minus_half, approx_rsqrt), pmul(a, approx_rsqrt), one_point_five));
139
+ for (int step = 1; step < Steps; ++step) {
140
+ rsqrt = pmul(rsqrt, pmadd(pmul(minus_half, rsqrt), pmul(a, rsqrt), one_point_five));
141
+ }
142
+
143
+ // Return sqrt(x) = x * rsqrt(x) for non-zero finite positive arguments.
144
+ // Return a itself for 0 or +inf, NaN for negative arguments.
145
+ return pselect(return_a, a, pmul(a, rsqrt));
146
+ }
147
+ };
148
+
149
+ template <typename RealScalar>
150
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y) {
151
+ // IEEE IEC 6059 special cases.
152
+ if ((numext::isinf)(x) || (numext::isinf)(y)) return NumTraits<RealScalar>::infinity();
153
+ if ((numext::isnan)(x) || (numext::isnan)(y)) return NumTraits<RealScalar>::quiet_NaN();
154
+
155
+ EIGEN_USING_STD(sqrt);
156
+ RealScalar p, qp;
157
+ p = numext::maxi(x, y);
158
+ if (numext::is_exactly_zero(p)) return RealScalar(0);
159
+ qp = numext::mini(y, x) / p;
160
+ return p * sqrt(RealScalar(1) + qp * qp);
161
+ }
162
+
163
+ template <typename Scalar>
164
+ struct hypot_impl {
165
+ typedef typename NumTraits<Scalar>::Real RealScalar;
166
+ static EIGEN_DEVICE_FUNC inline RealScalar run(const Scalar& x, const Scalar& y) {
167
+ EIGEN_USING_STD(abs);
168
+ return positive_real_hypot<RealScalar>(abs(x), abs(y));
169
+ }
170
+ };
171
+
172
+ // Generic complex sqrt implementation that correctly handles corner cases
173
+ // according to https://en.cppreference.com/w/cpp/numeric/complex/sqrt
174
+ template <typename T>
175
+ EIGEN_DEVICE_FUNC std::complex<T> complex_sqrt(const std::complex<T>& z) {
176
+ // Computes the principal sqrt of the input.
177
+ //
178
+ // For a complex square root of the number x + i*y. We want to find real
179
+ // numbers u and v such that
180
+ // (u + i*v)^2 = x + i*y <=>
181
+ // u^2 - v^2 + i*2*u*v = x + i*v.
182
+ // By equating the real and imaginary parts we get:
183
+ // u^2 - v^2 = x
184
+ // 2*u*v = y.
185
+ //
186
+ // For x >= 0, this has the numerically stable solution
187
+ // u = sqrt(0.5 * (x + sqrt(x^2 + y^2)))
188
+ // v = y / (2 * u)
189
+ // and for x < 0,
190
+ // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2)))
191
+ // u = y / (2 * v)
192
+ //
193
+ // Letting w = sqrt(0.5 * (|x| + |z|)),
194
+ // if x == 0: u = w, v = sign(y) * w
195
+ // if x > 0: u = w, v = y / (2 * w)
196
+ // if x < 0: u = |y| / (2 * w), v = sign(y) * w
197
+
198
+ const T x = numext::real(z);
199
+ const T y = numext::imag(z);
200
+ const T zero = T(0);
201
+ const T w = numext::sqrt(T(0.5) * (numext::abs(x) + numext::hypot(x, y)));
202
+
203
+ return (numext::isinf)(y) ? std::complex<T>(NumTraits<T>::infinity(), y)
204
+ : numext::is_exactly_zero(x) ? std::complex<T>(w, y < zero ? -w : w)
205
+ : x > zero ? std::complex<T>(w, y / (2 * w))
206
+ : std::complex<T>(numext::abs(y) / (2 * w), y < zero ? -w : w);
207
+ }
208
+
209
+ // Generic complex rsqrt implementation.
210
+ template <typename T>
211
+ EIGEN_DEVICE_FUNC std::complex<T> complex_rsqrt(const std::complex<T>& z) {
212
+ // Computes the principal reciprocal sqrt of the input.
213
+ //
214
+ // For a complex reciprocal square root of the number z = x + i*y. We want to
215
+ // find real numbers u and v such that
216
+ // (u + i*v)^2 = 1 / (x + i*y) <=>
217
+ // u^2 - v^2 + i*2*u*v = x/|z|^2 - i*v/|z|^2.
218
+ // By equating the real and imaginary parts we get:
219
+ // u^2 - v^2 = x/|z|^2
220
+ // 2*u*v = y/|z|^2.
221
+ //
222
+ // For x >= 0, this has the numerically stable solution
223
+ // u = sqrt(0.5 * (x + |z|)) / |z|
224
+ // v = -y / (2 * u * |z|)
225
+ // and for x < 0,
226
+ // v = -sign(y) * sqrt(0.5 * (-x + |z|)) / |z|
227
+ // u = -y / (2 * v * |z|)
228
+ //
229
+ // Letting w = sqrt(0.5 * (|x| + |z|)),
230
+ // if x == 0: u = w / |z|, v = -sign(y) * w / |z|
231
+ // if x > 0: u = w / |z|, v = -y / (2 * w * |z|)
232
+ // if x < 0: u = |y| / (2 * w * |z|), v = -sign(y) * w / |z|
233
+
234
+ const T x = numext::real(z);
235
+ const T y = numext::imag(z);
236
+ const T zero = T(0);
237
+
238
+ const T abs_z = numext::hypot(x, y);
239
+ const T w = numext::sqrt(T(0.5) * (numext::abs(x) + abs_z));
240
+ const T woz = w / abs_z;
241
+ // Corner cases consistent with 1/sqrt(z) on gcc/clang.
242
+ return numext::is_exactly_zero(abs_z) ? std::complex<T>(NumTraits<T>::infinity(), NumTraits<T>::quiet_NaN())
243
+ : ((numext::isinf)(x) || (numext::isinf)(y)) ? std::complex<T>(zero, zero)
244
+ : numext::is_exactly_zero(x) ? std::complex<T>(woz, y < zero ? woz : -woz)
245
+ : x > zero ? std::complex<T>(woz, -y / (2 * w * abs_z))
246
+ : std::complex<T>(numext::abs(y) / (2 * w * abs_z), y < zero ? woz : -woz);
247
+ }
248
+
249
+ template <typename T>
250
+ EIGEN_DEVICE_FUNC std::complex<T> complex_log(const std::complex<T>& z) {
251
+ // Computes complex log.
252
+ T a = numext::abs(z);
253
+ EIGEN_USING_STD(atan2);
254
+ T b = atan2(z.imag(), z.real());
255
+ return std::complex<T>(numext::log(a), b);
256
+ }
257
+
258
+ } // end namespace internal
259
+
260
+ } // end namespace Eigen
261
+
262
+ #endif // EIGEN_MATHFUNCTIONSIMPL_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Matrix.h ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_MATRIX_H
12
+ #define EIGEN_MATRIX_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+ template <typename Scalar_, int Rows_, int Cols_, int Options_, int MaxRows_, int MaxCols_>
21
+ struct traits<Matrix<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> {
22
+ private:
23
+ constexpr static int size = internal::size_at_compile_time(Rows_, Cols_);
24
+ typedef typename find_best_packet<Scalar_, size>::type PacketScalar;
25
+ enum {
26
+ row_major_bit = Options_ & RowMajor ? RowMajorBit : 0,
27
+ is_dynamic_size_storage = MaxRows_ == Dynamic || MaxCols_ == Dynamic,
28
+ max_size = is_dynamic_size_storage ? Dynamic : MaxRows_ * MaxCols_,
29
+ default_alignment = compute_default_alignment<Scalar_, max_size>::value,
30
+ actual_alignment = ((Options_ & DontAlign) == 0) ? default_alignment : 0,
31
+ required_alignment = unpacket_traits<PacketScalar>::alignment,
32
+ packet_access_bit = (packet_traits<Scalar_>::Vectorizable &&
33
+ (EIGEN_UNALIGNED_VECTORIZE || (int(actual_alignment) >= int(required_alignment))))
34
+ ? PacketAccessBit
35
+ : 0
36
+ };
37
+
38
+ public:
39
+ typedef Scalar_ Scalar;
40
+ typedef Dense StorageKind;
41
+ typedef Eigen::Index StorageIndex;
42
+ typedef MatrixXpr XprKind;
43
+ enum {
44
+ RowsAtCompileTime = Rows_,
45
+ ColsAtCompileTime = Cols_,
46
+ MaxRowsAtCompileTime = MaxRows_,
47
+ MaxColsAtCompileTime = MaxCols_,
48
+ Flags = compute_matrix_flags(Options_),
49
+ Options = Options_,
50
+ InnerStrideAtCompileTime = 1,
51
+ OuterStrideAtCompileTime = (int(Options) & int(RowMajor)) ? ColsAtCompileTime : RowsAtCompileTime,
52
+
53
+ // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase
54
+ EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit,
55
+ Alignment = actual_alignment
56
+ };
57
+ };
58
+ } // namespace internal
59
+
60
+ /** \class Matrix
61
+ * \ingroup Core_Module
62
+ *
63
+ * \brief The matrix class, also used for vectors and row-vectors
64
+ *
65
+ * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen.
66
+ * Vectors are matrices with one column, and row-vectors are matrices with one row.
67
+ *
68
+ * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note").
69
+ *
70
+ * The first three template parameters are required:
71
+ * \tparam Scalar_ Numeric type, e.g. float, double, int or std::complex<float>.
72
+ * User defined scalar types are supported as well (see \ref user_defined_scalars "here").
73
+ * \tparam Rows_ Number of rows, or \b Dynamic
74
+ * \tparam Cols_ Number of columns, or \b Dynamic
75
+ *
76
+ * The remaining template parameters are optional -- in most cases you don't have to worry about them.
77
+ * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either
78
+ * \b #AutoAlign or \b #DontAlign.
79
+ * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter
80
+ * controls alignment, which is required for vectorization. It defaults to aligning matrices except for fixed sizes that
81
+ * aren't a multiple of the packet size. \tparam MaxRows_ Maximum number of rows. Defaults to \a Rows_ (\ref maxrows
82
+ * "note"). \tparam MaxCols_ Maximum number of columns. Defaults to \a Cols_ (\ref maxrows "note").
83
+ *
84
+ * Eigen provides a number of typedefs covering the usual cases. Here are some examples:
85
+ *
86
+ * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix<double, 2, 2>)
87
+ * \li \c Vector4f is a vector of 4 floats (\c Matrix<float, 4, 1>)
88
+ * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix<int, 1, 3>)
89
+ *
90
+ * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix<float, Dynamic, Dynamic>)
91
+ * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix<float, Dynamic, 1>)
92
+ *
93
+ * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix<float, 2, Dynamic>)
94
+ * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix<double, Dynamic, 3>)
95
+ *
96
+ * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs.
97
+ *
98
+ * You can access elements of vectors and matrices using normal subscripting:
99
+ *
100
+ * \code
101
+ * Eigen::VectorXd v(10);
102
+ * v[0] = 0.1;
103
+ * v[1] = 0.2;
104
+ * v(0) = 0.3;
105
+ * v(1) = 0.4;
106
+ *
107
+ * Eigen::MatrixXi m(10, 10);
108
+ * m(0, 1) = 1;
109
+ * m(0, 2) = 2;
110
+ * m(0, 3) = 3;
111
+ * \endcode
112
+ *
113
+ * This class can be extended with the help of the plugin mechanism described on the page
114
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN.
115
+ *
116
+ * <i><b>Some notes:</b></i>
117
+ *
118
+ * <dl>
119
+ * <dt><b>\anchor dense Dense versus sparse:</b></dt>
120
+ * <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the
121
+ * Sparse module.
122
+ *
123
+ * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary
124
+ * contiguous array. This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero
125
+ * coefficients.</dd>
126
+ *
127
+ * <dt><b>\anchor fixedsize Fixed-size versus dynamic-size:</b></dt>
128
+ * <dd>Fixed-size means that the numbers of rows and columns are known at compile-time. In this case, Eigen allocates
129
+ * the array of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices,
130
+ * typically up to 4x4, sometimes up to 16x16. Larger matrices should be declared as dynamic-size even if one happens to
131
+ * know their size at compile-time.
132
+ *
133
+ * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they
134
+ * are runtime variables, and the array of coefficients is allocated dynamically on the heap.
135
+ *
136
+ * Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of
137
+ * a std::map. If you want this behavior, see the Sparse module.</dd>
138
+ *
139
+ * <dt><b>\anchor maxrows MaxRows_ and MaxCols_:</b></dt>
140
+ * <dd>In most cases, one just leaves these parameters to the default values.
141
+ * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases
142
+ * when the exact numbers of rows and columns are not known at compile-time, but it is known at compile-time that they
143
+ * cannot exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case
144
+ * MaxRows_ and MaxCols_ are the dimensions of the original matrix, while Rows_ and Cols_ are Dynamic.</dd>
145
+ * </dl>
146
+ *
147
+ * <i><b>ABI and storage layout</b></i>
148
+ *
149
+ * The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3.
150
+ * <table class="manual">
151
+ * <tr><th>Matrix type</th><th>Equivalent C structure</th></tr>
152
+ * <tr><td>\code Matrix<T,Dynamic,Dynamic> \endcode</td><td>\code
153
+ * struct {
154
+ * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0
155
+ * Eigen::Index rows, cols;
156
+ * };
157
+ * \endcode</td></tr>
158
+ * <tr class="alt"><td>\code
159
+ * Matrix<T,Dynamic,1>
160
+ * Matrix<T,1,Dynamic> \endcode</td><td>\code
161
+ * struct {
162
+ * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0
163
+ * Eigen::Index size;
164
+ * };
165
+ * \endcode</td></tr>
166
+ * <tr><td>\code Matrix<T,Rows,Cols> \endcode</td><td>\code
167
+ * struct {
168
+ * T data[Rows*Cols]; // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0
169
+ * };
170
+ * \endcode</td></tr>
171
+ * <tr class="alt"><td>\code Matrix<T,Dynamic,Dynamic,0,MaxRows,MaxCols> \endcode</td><td>\code
172
+ * struct {
173
+ * T data[MaxRows*MaxCols]; // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0
174
+ * Eigen::Index rows, cols;
175
+ * };
176
+ * \endcode</td></tr>
177
+ * </table>
178
+ * Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest
179
+ * possible power-of-two smaller to EIGEN_MAX_STATIC_ALIGN_BYTES.
180
+ *
181
+ * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy,
182
+ * \ref TopicStorageOrders
183
+ */
184
+
185
+ template <typename Scalar_, int Rows_, int Cols_, int Options_, int MaxRows_, int MaxCols_>
186
+ class Matrix : public PlainObjectBase<Matrix<Scalar_, Rows_, Cols_, Options_, MaxRows_, MaxCols_>> {
187
+ public:
188
+ /** \brief Base class typedef.
189
+ * \sa PlainObjectBase
190
+ */
191
+ typedef PlainObjectBase<Matrix> Base;
192
+
193
+ enum { Options = Options_ };
194
+
195
+ EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)
196
+
197
+ typedef typename Base::PlainObject PlainObject;
198
+
199
+ using Base::base;
200
+ using Base::coeffRef;
201
+
202
+ /**
203
+ * \brief Assigns matrices to each other.
204
+ *
205
+ * \note This is a special case of the templated operator=. Its purpose is
206
+ * to prevent a default operator= from hiding the templated operator=.
207
+ *
208
+ * \callgraph
209
+ */
210
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix& operator=(const Matrix& other) { return Base::_set(other); }
211
+
212
+ /** \internal
213
+ * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
214
+ *
215
+ * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
216
+ * it will be initialized.
217
+ *
218
+ * Note that copying a row-vector into a vector (and conversely) is allowed.
219
+ * The resizing, if any, is then done in the appropriate way so that row-vectors
220
+ * remain row-vectors and vectors remain vectors.
221
+ */
222
+ template <typename OtherDerived>
223
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase<OtherDerived>& other) {
224
+ return Base::_set(other);
225
+ }
226
+
227
+ /* Here, doxygen failed to copy the brief information when using \copydoc */
228
+
229
+ /**
230
+ * \brief Copies the generic expression \a other into *this.
231
+ * \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
232
+ */
233
+ template <typename OtherDerived>
234
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived>& other) {
235
+ return Base::operator=(other);
236
+ }
237
+
238
+ template <typename OtherDerived>
239
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue<OtherDerived>& func) {
240
+ return Base::operator=(func);
241
+ }
242
+
243
+ /** \brief Default constructor.
244
+ *
245
+ * For fixed-size matrices, does nothing.
246
+ *
247
+ * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
248
+ * is called a null matrix. This constructor is the unique way to create null matrices: resizing
249
+ * a matrix to 0 is not supported.
250
+ *
251
+ * \sa resize(Index,Index)
252
+ */
253
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix()
254
+ : Base(){EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED}
255
+
256
+ // FIXME is it still needed
257
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr explicit Matrix(
258
+ internal::constructor_without_unaligned_array_assert)
259
+ : Base(internal::constructor_without_unaligned_array_assert()){EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED}
260
+
261
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix(Matrix && other)
262
+ EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)
263
+ : Base(std::move(other)) {}
264
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE constexpr Matrix& operator=(Matrix&& other)
265
+ EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value) {
266
+ Base::operator=(std::move(other));
267
+ return *this;
268
+ }
269
+
270
+ /** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&... args)
271
+ *
272
+ * Example: \include Matrix_variadic_ctor_cxx11.cpp
273
+ * Output: \verbinclude Matrix_variadic_ctor_cxx11.out
274
+ *
275
+ * \sa Matrix(const std::initializer_list<std::initializer_list<Scalar>>&)
276
+ */
277
+ template <typename... ArgTypes>
278
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3,
279
+ const ArgTypes&... args)
280
+ : Base(a0, a1, a2, a3, args...) {}
281
+
282
+ /** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row.
283
+ * \cpp11
284
+ *
285
+ * In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
286
+ *
287
+ * Example: \include Matrix_initializer_list_23_cxx11.cpp
288
+ * Output: \verbinclude Matrix_initializer_list_23_cxx11.out
289
+ *
290
+ * Each of the inner initializer lists must contain the exact same number of elements, otherwise an assertion is
291
+ * triggered.
292
+ *
293
+ * In the case of a compile-time column vector, implicit transposition from a single row is allowed.
294
+ * Therefore <code>VectorXd{{1,2,3,4,5}}</code> is legal and the more verbose syntax
295
+ * <code>RowVectorXd{{1},{2},{3},{4},{5}}</code> can be avoided:
296
+ *
297
+ * Example: \include Matrix_initializer_list_vector_cxx11.cpp
298
+ * Output: \verbinclude Matrix_initializer_list_vector_cxx11.out
299
+ *
300
+ * In the case of fixed-sized matrices, the initializer list sizes must exactly match the matrix sizes,
301
+ * and implicit transposition is allowed for compile-time vectors only.
302
+ *
303
+ * \sa Matrix(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
304
+ */
305
+ EIGEN_DEVICE_FUNC explicit constexpr EIGEN_STRONG_INLINE Matrix(
306
+ const std::initializer_list<std::initializer_list<Scalar>>& list)
307
+ : Base(list) {}
308
+
309
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
310
+
311
+ // This constructor is for both 1x1 matrices and dynamic vectors
312
+ template <typename T>
313
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Matrix(const T& x) {
314
+ Base::template _init1<T>(x);
315
+ }
316
+
317
+ template <typename T0, typename T1>
318
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) {
319
+ Base::template _init2<T0, T1>(x, y);
320
+ }
321
+
322
+ #else
323
+ /** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */
324
+ EIGEN_DEVICE_FUNC explicit Matrix(const Scalar* data);
325
+
326
+ /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
327
+ *
328
+ * This is useful for dynamic-size vectors. For fixed-size vectors,
329
+ * it is redundant to pass these parameters, so one should use the default constructor
330
+ * Matrix() instead.
331
+ *
332
+ * \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance,
333
+ * calling Matrix<double,1,1>(1) will call the initialization constructor: Matrix(const Scalar&).
334
+ * For fixed-size \c 1x1 matrices it is therefore recommended to use the default
335
+ * constructor Matrix() instead, especially when using one of the non standard
336
+ * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
337
+ */
338
+ EIGEN_STRONG_INLINE explicit Matrix(Index dim);
339
+ /** \brief Constructs an initialized 1x1 matrix with the given coefficient
340
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
341
+ Matrix(const Scalar& x);
342
+ /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns.
343
+ *
344
+ * This is useful for dynamic-size matrices. For fixed-size matrices,
345
+ * it is redundant to pass these parameters, so one should use the default constructor
346
+ * Matrix() instead.
347
+ *
348
+ * \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance,
349
+ * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y).
350
+ * For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default
351
+ * constructor Matrix() instead, especially when using one of the non standard
352
+ * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives).
353
+ */
354
+ EIGEN_DEVICE_FUNC Matrix(Index rows, Index cols);
355
+
356
+ /** \brief Constructs an initialized 2D vector with given coefficients
357
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...) */
358
+ Matrix(const Scalar& x, const Scalar& y);
359
+ #endif // end EIGEN_PARSED_BY_DOXYGEN
360
+
361
+ /** \brief Constructs an initialized 3D vector with given coefficients
362
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
363
+ */
364
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) {
365
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3)
366
+ m_storage.data()[0] = x;
367
+ m_storage.data()[1] = y;
368
+ m_storage.data()[2] = z;
369
+ }
370
+ /** \brief Constructs an initialized 4D vector with given coefficients
371
+ * \sa Matrix(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&...)
372
+ */
373
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) {
374
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4)
375
+ m_storage.data()[0] = x;
376
+ m_storage.data()[1] = y;
377
+ m_storage.data()[2] = z;
378
+ m_storage.data()[3] = w;
379
+ }
380
+
381
+ /** \brief Copy constructor */
382
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Matrix& other) : Base(other) {}
383
+
384
+ /** \brief Copy constructor for generic expressions.
385
+ * \sa MatrixBase::operator=(const EigenBase<OtherDerived>&)
386
+ */
387
+ template <typename OtherDerived>
388
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived>& other) : Base(other.derived()) {}
389
+
390
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return 1; }
391
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return this->innerSize(); }
392
+
393
+ /////////// Geometry module ///////////
394
+
395
+ template <typename OtherDerived>
396
+ EIGEN_DEVICE_FUNC explicit Matrix(const RotationBase<OtherDerived, ColsAtCompileTime>& r);
397
+ template <typename OtherDerived>
398
+ EIGEN_DEVICE_FUNC Matrix& operator=(const RotationBase<OtherDerived, ColsAtCompileTime>& r);
399
+
400
+ // allow to extend Matrix outside Eigen
401
+ #ifdef EIGEN_MATRIX_PLUGIN
402
+ #include EIGEN_MATRIX_PLUGIN
403
+ #endif
404
+
405
+ protected:
406
+ template <typename Derived, typename OtherDerived, bool IsVector>
407
+ friend struct internal::conservative_resize_like_impl;
408
+
409
+ using Base::m_storage;
410
+ };
411
+
412
+ /** \defgroup matrixtypedefs Global matrix typedefs
413
+ *
414
+ * \ingroup Core_Module
415
+ *
416
+ * %Eigen defines several typedef shortcuts for most common matrix and vector types.
417
+ *
418
+ * The general patterns are the following:
419
+ *
420
+ * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
421
+ * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
422
+ * for complex double.
423
+ *
424
+ * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of
425
+ * floats.
426
+ *
427
+ * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
428
+ * a fixed-size vector of 4 complex floats.
429
+ *
430
+ * With \cpp11, template alias are also defined for common sizes.
431
+ * They follow the same pattern as above except that the scalar type suffix is replaced by a
432
+ * template parameter, i.e.:
433
+ * - `MatrixSize<Type>` where `Size` can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size.
434
+ * - `MatrixXSize<Type>` and `MatrixSizeX<Type>` where `Size` can be \c 2,\c 3,\c 4 for hybrid dynamic/fixed matrices.
435
+ * - `VectorSize<Type>` and `RowVectorSize<Type>` for column and row vectors.
436
+ *
437
+ * With \cpp11, you can also use fully generic column and row vector types: `Vector<Type,Size>` and
438
+ * `RowVector<Type,Size>`.
439
+ *
440
+ * \sa class Matrix
441
+ */
442
+
443
+ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
444
+ /** \ingroup matrixtypedefs */ \
445
+ /** \brief `Size`&times;`Size` matrix of type `Type`. */ \
446
+ typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix; \
447
+ /** \ingroup matrixtypedefs */ \
448
+ /** \brief `Size`&times;`1` vector of type `Type`. */ \
449
+ typedef Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \
450
+ /** \ingroup matrixtypedefs */ \
451
+ /** \brief `1`&times;`Size` vector of type `Type`. */ \
452
+ typedef Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix;
453
+
454
+ #define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \
455
+ /** \ingroup matrixtypedefs */ \
456
+ /** \brief `Size`&times;`Dynamic` matrix of type `Type`. */ \
457
+ typedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix; \
458
+ /** \ingroup matrixtypedefs */ \
459
+ /** \brief `Dynamic`&times;`Size` matrix of type `Type`. */ \
460
+ typedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix;
461
+
462
+ #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
463
+ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
464
+ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
465
+ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
466
+ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
467
+ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
468
+ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
469
+ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
470
+
471
+ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i)
472
+ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f)
473
+ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d)
474
+ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>, cf)
475
+ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
476
+
477
+ #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
478
+ #undef EIGEN_MAKE_TYPEDEFS
479
+ #undef EIGEN_MAKE_FIXED_TYPEDEFS
480
+
481
+ #define EIGEN_MAKE_TYPEDEFS(Size, SizeSuffix) \
482
+ /** \ingroup matrixtypedefs */ \
483
+ /** \brief \cpp11 `Size`&times;`Size` matrix of type `Type`.*/ \
484
+ template <typename Type> \
485
+ using Matrix##SizeSuffix = Matrix<Type, Size, Size>; \
486
+ /** \ingroup matrixtypedefs */ \
487
+ /** \brief \cpp11 `Size`&times;`1` vector of type `Type`.*/ \
488
+ template <typename Type> \
489
+ using Vector##SizeSuffix = Matrix<Type, Size, 1>; \
490
+ /** \ingroup matrixtypedefs */ \
491
+ /** \brief \cpp11 `1`&times;`Size` vector of type `Type`.*/ \
492
+ template <typename Type> \
493
+ using RowVector##SizeSuffix = Matrix<Type, 1, Size>;
494
+
495
+ #define EIGEN_MAKE_FIXED_TYPEDEFS(Size) \
496
+ /** \ingroup matrixtypedefs */ \
497
+ /** \brief \cpp11 `Size`&times;`Dynamic` matrix of type `Type` */ \
498
+ template <typename Type> \
499
+ using Matrix##Size##X = Matrix<Type, Size, Dynamic>; \
500
+ /** \ingroup matrixtypedefs */ \
501
+ /** \brief \cpp11 `Dynamic`&times;`Size` matrix of type `Type`. */ \
502
+ template <typename Type> \
503
+ using Matrix##X##Size = Matrix<Type, Dynamic, Size>;
504
+
505
+ EIGEN_MAKE_TYPEDEFS(2, 2)
506
+ EIGEN_MAKE_TYPEDEFS(3, 3)
507
+ EIGEN_MAKE_TYPEDEFS(4, 4)
508
+ EIGEN_MAKE_TYPEDEFS(Dynamic, X)
509
+ EIGEN_MAKE_FIXED_TYPEDEFS(2)
510
+ EIGEN_MAKE_FIXED_TYPEDEFS(3)
511
+ EIGEN_MAKE_FIXED_TYPEDEFS(4)
512
+
513
+ /** \ingroup matrixtypedefs
514
+ * \brief \cpp11 `Size`&times;`1` vector of type `Type`. */
515
+ template <typename Type, int Size>
516
+ using Vector = Matrix<Type, Size, 1>;
517
+
518
+ /** \ingroup matrixtypedefs
519
+ * \brief \cpp11 `1`&times;`Size` vector of type `Type`. */
520
+ template <typename Type, int Size>
521
+ using RowVector = Matrix<Type, 1, Size>;
522
+
523
+ #undef EIGEN_MAKE_TYPEDEFS
524
+ #undef EIGEN_MAKE_FIXED_TYPEDEFS
525
+
526
+ } // end namespace Eigen
527
+
528
+ #endif // EIGEN_MATRIX_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/MatrixBase.h ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_MATRIXBASE_H
12
+ #define EIGEN_MATRIXBASE_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \class MatrixBase
20
+ * \ingroup Core_Module
21
+ *
22
+ * \brief Base class for all dense matrices, vectors, and expressions
23
+ *
24
+ * This class is the base that is inherited by all matrix, vector, and related expression
25
+ * types. Most of the Eigen API is contained in this class, and its base classes. Other important
26
+ * classes for the Eigen API are Matrix, and VectorwiseOp.
27
+ *
28
+ * Note that some methods are defined in other modules such as the \ref LU_Module LU module
29
+ * for all functions related to matrix inversions.
30
+ *
31
+ * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
32
+ *
33
+ * When writing a function taking Eigen objects as argument, if you want your function
34
+ * to take as argument any matrix, vector, or expression, just let it take a
35
+ * MatrixBase argument. As an example, here is a function printFirstRow which, given
36
+ * a matrix, vector, or expression \a x, prints the first row of \a x.
37
+ *
38
+ * \code
39
+ template<typename Derived>
40
+ void printFirstRow(const Eigen::MatrixBase<Derived>& x)
41
+ {
42
+ cout << x.row(0) << endl;
43
+ }
44
+ * \endcode
45
+ *
46
+ * This class can be extended with the help of the plugin mechanism described on the page
47
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
48
+ *
49
+ * \sa \blank \ref TopicClassHierarchy
50
+ */
51
+ template <typename Derived>
52
+ class MatrixBase : public DenseBase<Derived> {
53
+ public:
54
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
55
+ typedef MatrixBase StorageBaseType;
56
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
57
+ typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
58
+ typedef typename internal::traits<Derived>::Scalar Scalar;
59
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
60
+ typedef typename NumTraits<Scalar>::Real RealScalar;
61
+
62
+ typedef DenseBase<Derived> Base;
63
+ using Base::ColsAtCompileTime;
64
+ using Base::Flags;
65
+ using Base::IsVectorAtCompileTime;
66
+ using Base::MaxColsAtCompileTime;
67
+ using Base::MaxRowsAtCompileTime;
68
+ using Base::MaxSizeAtCompileTime;
69
+ using Base::RowsAtCompileTime;
70
+ using Base::SizeAtCompileTime;
71
+
72
+ using Base::coeff;
73
+ using Base::coeffRef;
74
+ using Base::cols;
75
+ using Base::const_cast_derived;
76
+ using Base::derived;
77
+ using Base::eval;
78
+ using Base::lazyAssign;
79
+ using Base::rows;
80
+ using Base::size;
81
+ using Base::operator-;
82
+ using Base::operator+=;
83
+ using Base::operator-=;
84
+ using Base::operator*=;
85
+ using Base::operator/=;
86
+
87
+ typedef typename Base::CoeffReturnType CoeffReturnType;
88
+ typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
89
+ typedef typename Base::RowXpr RowXpr;
90
+ typedef typename Base::ColXpr ColXpr;
91
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
92
+
93
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
94
+ /** type of the equivalent square matrix */
95
+ typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
96
+ internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)>
97
+ SquareMatrixType;
98
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
99
+
100
+ /** \returns the size of the main diagonal, which is min(rows(),cols()).
101
+ * \sa rows(), cols(), SizeAtCompileTime. */
102
+ EIGEN_DEVICE_FUNC inline Index diagonalSize() const { return (numext::mini)(rows(), cols()); }
103
+
104
+ typedef typename Base::PlainObject PlainObject;
105
+
106
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
107
+ /** \internal Represents a matrix with all coefficients equal to one another*/
108
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> ConstantReturnType;
109
+ /** \internal the return type of MatrixBase::adjoint() */
110
+ typedef std::conditional_t<NumTraits<Scalar>::IsComplex,
111
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
112
+ ConstTransposeReturnType>
113
+ AdjointReturnType;
114
+ /** \internal Return type of eigenvalues() */
115
+ typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor>
116
+ EigenvaluesReturnType;
117
+ /** \internal the return type of identity */
118
+ typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>, PlainObject> IdentityReturnType;
119
+ /** \internal the return type of unit vectors */
120
+ typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,
121
+ internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime>
122
+ BasisReturnType;
123
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
124
+
125
+ #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
126
+ #define EIGEN_DOC_UNARY_ADDONS(X, Y)
127
+ #include "../plugins/CommonCwiseBinaryOps.inc"
128
+ #include "../plugins/MatrixCwiseUnaryOps.inc"
129
+ #include "../plugins/MatrixCwiseBinaryOps.inc"
130
+ #ifdef EIGEN_MATRIXBASE_PLUGIN
131
+ #include EIGEN_MATRIXBASE_PLUGIN
132
+ #endif
133
+ #undef EIGEN_CURRENT_STORAGE_BASE_CLASS
134
+ #undef EIGEN_DOC_UNARY_ADDONS
135
+
136
+ /** Special case of the template operator=, in order to prevent the compiler
137
+ * from generating a default operator= (issue hit with g++ 4.1)
138
+ */
139
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const MatrixBase& other);
140
+
141
+ // We cannot inherit here via Base::operator= since it is causing
142
+ // trouble with MSVC.
143
+
144
+ template <typename OtherDerived>
145
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other);
146
+
147
+ template <typename OtherDerived>
148
+ EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived>& other);
149
+
150
+ template <typename OtherDerived>
151
+ EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& other);
152
+
153
+ template <typename OtherDerived>
154
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const MatrixBase<OtherDerived>& other);
155
+ template <typename OtherDerived>
156
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const MatrixBase<OtherDerived>& other);
157
+
158
+ template <typename OtherDerived>
159
+ EIGEN_DEVICE_FUNC const Product<Derived, OtherDerived> operator*(const MatrixBase<OtherDerived>& other) const;
160
+
161
+ template <typename OtherDerived>
162
+ EIGEN_DEVICE_FUNC const Product<Derived, OtherDerived, LazyProduct> lazyProduct(
163
+ const MatrixBase<OtherDerived>& other) const;
164
+
165
+ template <typename OtherDerived>
166
+ Derived& operator*=(const EigenBase<OtherDerived>& other);
167
+
168
+ template <typename OtherDerived>
169
+ void applyOnTheLeft(const EigenBase<OtherDerived>& other);
170
+
171
+ template <typename OtherDerived>
172
+ void applyOnTheRight(const EigenBase<OtherDerived>& other);
173
+
174
+ template <typename DiagonalDerived>
175
+ EIGEN_DEVICE_FUNC const Product<Derived, DiagonalDerived, LazyProduct> operator*(
176
+ const DiagonalBase<DiagonalDerived>& diagonal) const;
177
+
178
+ template <typename SkewDerived>
179
+ EIGEN_DEVICE_FUNC const Product<Derived, SkewDerived, LazyProduct> operator*(
180
+ const SkewSymmetricBase<SkewDerived>& skew) const;
181
+
182
+ template <typename OtherDerived>
183
+ EIGEN_DEVICE_FUNC typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,
184
+ typename internal::traits<OtherDerived>::Scalar>::ReturnType
185
+ dot(const MatrixBase<OtherDerived>& other) const;
186
+
187
+ EIGEN_DEVICE_FUNC RealScalar squaredNorm() const;
188
+ EIGEN_DEVICE_FUNC RealScalar norm() const;
189
+ RealScalar stableNorm() const;
190
+ RealScalar blueNorm() const;
191
+ RealScalar hypotNorm() const;
192
+ EIGEN_DEVICE_FUNC const PlainObject normalized() const;
193
+ EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const;
194
+ EIGEN_DEVICE_FUNC void normalize();
195
+ EIGEN_DEVICE_FUNC void stableNormalize();
196
+
197
+ EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const;
198
+ EIGEN_DEVICE_FUNC void adjointInPlace();
199
+
200
+ typedef Diagonal<Derived> DiagonalReturnType;
201
+ EIGEN_DEVICE_FUNC DiagonalReturnType diagonal();
202
+
203
+ typedef Diagonal<const Derived> ConstDiagonalReturnType;
204
+ EIGEN_DEVICE_FUNC const ConstDiagonalReturnType diagonal() const;
205
+
206
+ template <int Index>
207
+ EIGEN_DEVICE_FUNC Diagonal<Derived, Index> diagonal();
208
+
209
+ template <int Index>
210
+ EIGEN_DEVICE_FUNC const Diagonal<const Derived, Index> diagonal() const;
211
+
212
+ EIGEN_DEVICE_FUNC Diagonal<Derived, DynamicIndex> diagonal(Index index);
213
+ EIGEN_DEVICE_FUNC const Diagonal<const Derived, DynamicIndex> diagonal(Index index) const;
214
+
215
+ template <unsigned int Mode>
216
+ struct TriangularViewReturnType {
217
+ typedef TriangularView<Derived, Mode> Type;
218
+ };
219
+ template <unsigned int Mode>
220
+ struct ConstTriangularViewReturnType {
221
+ typedef const TriangularView<const Derived, Mode> Type;
222
+ };
223
+
224
+ template <unsigned int Mode>
225
+ EIGEN_DEVICE_FUNC typename TriangularViewReturnType<Mode>::Type triangularView();
226
+ template <unsigned int Mode>
227
+ EIGEN_DEVICE_FUNC typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;
228
+
229
+ template <unsigned int UpLo>
230
+ struct SelfAdjointViewReturnType {
231
+ typedef SelfAdjointView<Derived, UpLo> Type;
232
+ };
233
+ template <unsigned int UpLo>
234
+ struct ConstSelfAdjointViewReturnType {
235
+ typedef const SelfAdjointView<const Derived, UpLo> Type;
236
+ };
237
+
238
+ template <unsigned int UpLo>
239
+ EIGEN_DEVICE_FUNC typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
240
+ template <unsigned int UpLo>
241
+ EIGEN_DEVICE_FUNC typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
242
+
243
+ const SparseView<Derived> sparseView(
244
+ const Scalar& m_reference = Scalar(0),
245
+ const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
246
+ EIGEN_DEVICE_FUNC static const IdentityReturnType Identity();
247
+ EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols);
248
+ EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i);
249
+ EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i);
250
+ EIGEN_DEVICE_FUNC static const BasisReturnType UnitX();
251
+ EIGEN_DEVICE_FUNC static const BasisReturnType UnitY();
252
+ EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ();
253
+ EIGEN_DEVICE_FUNC static const BasisReturnType UnitW();
254
+
255
+ EIGEN_DEVICE_FUNC const DiagonalWrapper<const Derived> asDiagonal() const;
256
+ const PermutationWrapper<const Derived> asPermutation() const;
257
+ EIGEN_DEVICE_FUNC const SkewSymmetricWrapper<const Derived> asSkewSymmetric() const;
258
+
259
+ EIGEN_DEVICE_FUNC Derived& setIdentity();
260
+ EIGEN_DEVICE_FUNC Derived& setIdentity(Index rows, Index cols);
261
+ EIGEN_DEVICE_FUNC Derived& setUnit(Index i);
262
+ EIGEN_DEVICE_FUNC Derived& setUnit(Index newSize, Index i);
263
+
264
+ bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
265
+ bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
266
+
267
+ bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
268
+ bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
269
+
270
+ bool isSkewSymmetric(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
271
+
272
+ template <typename OtherDerived>
273
+ bool isOrthogonal(const MatrixBase<OtherDerived>& other,
274
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
275
+ bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
276
+
277
+ /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
278
+ * \warning When using floating point scalar values you probably should rather use a
279
+ * fuzzy comparison such as isApprox()
280
+ * \sa isApprox(), operator!= */
281
+ template <typename OtherDerived>
282
+ EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase<OtherDerived>& other) const {
283
+ return cwiseEqual(other).all();
284
+ }
285
+
286
+ /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
287
+ * \warning When using floating point scalar values you probably should rather use a
288
+ * fuzzy comparison such as isApprox()
289
+ * \sa isApprox(), operator== */
290
+ template <typename OtherDerived>
291
+ EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase<OtherDerived>& other) const {
292
+ return cwiseNotEqual(other).any();
293
+ }
294
+
295
+ NoAlias<Derived, Eigen::MatrixBase> EIGEN_DEVICE_FUNC noalias();
296
+
297
+ // TODO forceAlignedAccess is temporarily disabled
298
+ // Need to find a nicer workaround.
299
+ inline const Derived& forceAlignedAccess() const { return derived(); }
300
+ inline Derived& forceAlignedAccess() { return derived(); }
301
+ template <bool Enable>
302
+ inline const Derived& forceAlignedAccessIf() const {
303
+ return derived();
304
+ }
305
+ template <bool Enable>
306
+ inline Derived& forceAlignedAccessIf() {
307
+ return derived();
308
+ }
309
+
310
+ EIGEN_DEVICE_FUNC Scalar trace() const;
311
+
312
+ template <int p>
313
+ EIGEN_DEVICE_FUNC RealScalar lpNorm() const;
314
+
315
+ EIGEN_DEVICE_FUNC MatrixBase<Derived>& matrix() { return *this; }
316
+ EIGEN_DEVICE_FUNC const MatrixBase<Derived>& matrix() const { return *this; }
317
+
318
+ /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix
319
+ * \sa ArrayBase::matrix() */
320
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper<Derived> array() { return ArrayWrapper<Derived>(derived()); }
321
+ /** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix
322
+ * \sa ArrayBase::matrix() */
323
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper<const Derived> array() const {
324
+ return ArrayWrapper<const Derived>(derived());
325
+ }
326
+
327
+ /////////// LU module ///////////
328
+
329
+ template <typename PermutationIndex = DefaultPermutationIndex>
330
+ inline const FullPivLU<PlainObject, PermutationIndex> fullPivLu() const;
331
+ template <typename PermutationIndex = DefaultPermutationIndex>
332
+ inline const PartialPivLU<PlainObject, PermutationIndex> partialPivLu() const;
333
+
334
+ template <typename PermutationIndex = DefaultPermutationIndex>
335
+ inline const PartialPivLU<PlainObject, PermutationIndex> lu() const;
336
+
337
+ EIGEN_DEVICE_FUNC inline const Inverse<Derived> inverse() const;
338
+
339
+ template <typename ResultType>
340
+ inline void computeInverseAndDetWithCheck(
341
+ ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible,
342
+ const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()) const;
343
+
344
+ template <typename ResultType>
345
+ inline void computeInverseWithCheck(
346
+ ResultType& inverse, bool& invertible,
347
+ const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()) const;
348
+
349
+ EIGEN_DEVICE_FUNC Scalar determinant() const;
350
+
351
+ /////////// Cholesky module ///////////
352
+
353
+ inline const LLT<PlainObject> llt() const;
354
+ inline const LDLT<PlainObject> ldlt() const;
355
+
356
+ /////////// QR module ///////////
357
+
358
+ inline const HouseholderQR<PlainObject> householderQr() const;
359
+ template <typename PermutationIndex = DefaultPermutationIndex>
360
+ inline const ColPivHouseholderQR<PlainObject, PermutationIndex> colPivHouseholderQr() const;
361
+ template <typename PermutationIndex = DefaultPermutationIndex>
362
+ inline const FullPivHouseholderQR<PlainObject, PermutationIndex> fullPivHouseholderQr() const;
363
+ template <typename PermutationIndex = DefaultPermutationIndex>
364
+ inline const CompleteOrthogonalDecomposition<PlainObject, PermutationIndex> completeOrthogonalDecomposition() const;
365
+
366
+ /////////// Eigenvalues module ///////////
367
+
368
+ inline EigenvaluesReturnType eigenvalues() const;
369
+ inline RealScalar operatorNorm() const;
370
+
371
+ /////////// SVD module ///////////
372
+
373
+ template <int Options = 0>
374
+ inline JacobiSVD<PlainObject, Options> jacobiSvd() const;
375
+ template <int Options = 0>
376
+ EIGEN_DEPRECATED inline JacobiSVD<PlainObject, Options> jacobiSvd(unsigned int computationOptions) const;
377
+
378
+ template <int Options = 0>
379
+ inline BDCSVD<PlainObject, Options> bdcSvd() const;
380
+ template <int Options = 0>
381
+ EIGEN_DEPRECATED inline BDCSVD<PlainObject, Options> bdcSvd(unsigned int computationOptions) const;
382
+
383
+ /////////// Geometry module ///////////
384
+
385
+ template <typename OtherDerived>
386
+ EIGEN_DEVICE_FUNC inline typename internal::cross_impl<Derived, OtherDerived>::return_type cross(
387
+ const MatrixBase<OtherDerived>& other) const;
388
+
389
+ template <typename OtherDerived>
390
+ EIGEN_DEVICE_FUNC inline PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
391
+
392
+ EIGEN_DEVICE_FUNC inline PlainObject unitOrthogonal(void) const;
393
+
394
+ EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline Matrix<Scalar, 3, 1> eulerAngles(Index a0, Index a1, Index a2) const;
395
+
396
+ EIGEN_DEVICE_FUNC inline Matrix<Scalar, 3, 1> canonicalEulerAngles(Index a0, Index a1, Index a2) const;
397
+
398
+ // put this as separate enum value to work around possible GCC 4.3 bug (?)
399
+ enum {
400
+ HomogeneousReturnTypeDirection =
401
+ ColsAtCompileTime == 1 && RowsAtCompileTime == 1
402
+ ? ((internal::traits<Derived>::Flags & RowMajorBit) == RowMajorBit ? Horizontal : Vertical)
403
+ : ColsAtCompileTime == 1 ? Vertical
404
+ : Horizontal
405
+ };
406
+ typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;
407
+ EIGEN_DEVICE_FUNC inline HomogeneousReturnType homogeneous() const;
408
+
409
+ enum { SizeMinusOne = SizeAtCompileTime == Dynamic ? Dynamic : SizeAtCompileTime - 1 };
410
+ typedef Block<const Derived, internal::traits<Derived>::ColsAtCompileTime == 1 ? SizeMinusOne : 1,
411
+ internal::traits<Derived>::ColsAtCompileTime == 1 ? 1 : SizeMinusOne>
412
+ ConstStartMinusOne;
413
+ typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne, Scalar, quotient) HNormalizedReturnType;
414
+ EIGEN_DEVICE_FUNC inline const HNormalizedReturnType hnormalized() const;
415
+
416
+ ////////// Householder module ///////////
417
+
418
+ EIGEN_DEVICE_FUNC void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
419
+ template <typename EssentialPart>
420
+ EIGEN_DEVICE_FUNC void makeHouseholder(EssentialPart& essential, Scalar& tau, RealScalar& beta) const;
421
+ template <typename EssentialPart>
422
+ EIGEN_DEVICE_FUNC void applyHouseholderOnTheLeft(const EssentialPart& essential, const Scalar& tau,
423
+ Scalar* workspace);
424
+ template <typename EssentialPart>
425
+ EIGEN_DEVICE_FUNC void applyHouseholderOnTheRight(const EssentialPart& essential, const Scalar& tau,
426
+ Scalar* workspace);
427
+
428
+ ///////// Jacobi module /////////
429
+
430
+ template <typename OtherScalar>
431
+ EIGEN_DEVICE_FUNC void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
432
+ template <typename OtherScalar>
433
+ EIGEN_DEVICE_FUNC void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
434
+
435
+ ///////// SparseCore module /////////
436
+
437
+ template <typename OtherDerived>
438
+ EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
439
+ cwiseProduct(const SparseMatrixBase<OtherDerived>& other) const {
440
+ return other.cwiseProduct(derived());
441
+ }
442
+
443
+ ///////// MatrixFunctions module /////////
444
+
445
+ typedef typename internal::stem_function<Scalar>::type StemFunction;
446
+ #define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \
447
+ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a \
448
+ * href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the \
449
+ * coefficient-wise Description use ArrayBase::##Name . */ \
450
+ const ReturnType<Derived> Name() const;
451
+ #define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \
452
+ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a \
453
+ * href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the \
454
+ * coefficient-wise Description use ArrayBase::##Name . */ \
455
+ const ReturnType<Derived> Name(Argument) const;
456
+
457
+ EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential)
458
+ /** \brief Helper function for the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported
459
+ * MatrixFunctions module</a>.*/
460
+ const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
461
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine)
462
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine)
463
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, atanh, inverse hyperbolic cosine)
464
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, acosh, inverse hyperbolic cosine)
465
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, asinh, inverse hyperbolic sine)
466
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine)
467
+ EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine)
468
+ EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root)
469
+ EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm)
470
+ EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p)
471
+ EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const std::complex<RealScalar>& p)
472
+
473
+ protected:
474
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase)
475
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase)
476
+
477
+ private:
478
+ EIGEN_DEVICE_FUNC explicit MatrixBase(int);
479
+ EIGEN_DEVICE_FUNC MatrixBase(int, int);
480
+ template <typename OtherDerived>
481
+ EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase<OtherDerived>&);
482
+
483
+ protected:
484
+ // mixing arrays and matrices is not legal
485
+ template <typename OtherDerived>
486
+ Derived& operator+=(const ArrayBase<OtherDerived>&) {
487
+ EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
488
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
489
+ return *this;
490
+ }
491
+ // mixing arrays and matrices is not legal
492
+ template <typename OtherDerived>
493
+ Derived& operator-=(const ArrayBase<OtherDerived>&) {
494
+ EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar)) == -1,
495
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);
496
+ return *this;
497
+ }
498
+ };
499
+
500
+ /***************************************************************************
501
+ * Implementation of matrix base methods
502
+ ***************************************************************************/
503
+
504
+ /** replaces \c *this by \c *this * \a other.
505
+ *
506
+ * \returns a reference to \c *this
507
+ *
508
+ * Example: \include MatrixBase_applyOnTheRight.cpp
509
+ * Output: \verbinclude MatrixBase_applyOnTheRight.out
510
+ */
511
+ template <typename Derived>
512
+ template <typename OtherDerived>
513
+ inline Derived& MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived>& other) {
514
+ other.derived().applyThisOnTheRight(derived());
515
+ return derived();
516
+ }
517
+
518
+ /** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
519
+ *
520
+ * Example: \include MatrixBase_applyOnTheRight.cpp
521
+ * Output: \verbinclude MatrixBase_applyOnTheRight.out
522
+ */
523
+ template <typename Derived>
524
+ template <typename OtherDerived>
525
+ inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived>& other) {
526
+ other.derived().applyThisOnTheRight(derived());
527
+ }
528
+
529
+ /** replaces \c *this by \a other * \c *this.
530
+ *
531
+ * Example: \include MatrixBase_applyOnTheLeft.cpp
532
+ * Output: \verbinclude MatrixBase_applyOnTheLeft.out
533
+ */
534
+ template <typename Derived>
535
+ template <typename OtherDerived>
536
+ inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived>& other) {
537
+ other.derived().applyThisOnTheLeft(derived());
538
+ }
539
+
540
+ } // end namespace Eigen
541
+
542
+ #endif // EIGEN_MATRIXBASE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Product.h ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PRODUCT_H
11
+ #define EIGEN_PRODUCT_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ template <typename Lhs, typename Rhs, int Option, typename StorageKind>
19
+ class ProductImpl;
20
+
21
+ namespace internal {
22
+
23
+ template <typename Lhs, typename Rhs, int Option>
24
+ struct traits<Product<Lhs, Rhs, Option>> {
25
+ typedef remove_all_t<Lhs> LhsCleaned;
26
+ typedef remove_all_t<Rhs> RhsCleaned;
27
+ typedef traits<LhsCleaned> LhsTraits;
28
+ typedef traits<RhsCleaned> RhsTraits;
29
+
30
+ typedef MatrixXpr XprKind;
31
+
32
+ typedef typename ScalarBinaryOpTraits<typename traits<LhsCleaned>::Scalar,
33
+ typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;
34
+ typedef typename product_promote_storage_type<typename LhsTraits::StorageKind, typename RhsTraits::StorageKind,
35
+ internal::product_type<Lhs, Rhs>::ret>::ret StorageKind;
36
+ typedef typename promote_index_type<typename LhsTraits::StorageIndex, typename RhsTraits::StorageIndex>::type
37
+ StorageIndex;
38
+
39
+ enum {
40
+ RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
41
+ ColsAtCompileTime = RhsTraits::ColsAtCompileTime,
42
+ MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime,
43
+ MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,
44
+
45
+ // FIXME: only needed by GeneralMatrixMatrixTriangular
46
+ InnerSize = min_size_prefer_fixed(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
47
+
48
+ // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
49
+ Flags = (MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1) ? RowMajorBit
50
+ : (MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1) ? 0
51
+ : (((LhsTraits::Flags & NoPreferredStorageOrderBit) && (RhsTraits::Flags & RowMajorBit)) ||
52
+ ((RhsTraits::Flags & NoPreferredStorageOrderBit) && (LhsTraits::Flags & RowMajorBit)))
53
+ ? RowMajorBit
54
+ : NoPreferredStorageOrderBit
55
+ };
56
+ };
57
+
58
+ struct TransposeProductEnum {
59
+ // convenience enumerations to specialize transposed products
60
+ enum : int {
61
+ Default = 0x00,
62
+ Matrix = 0x01,
63
+ Permutation = 0x02,
64
+ MatrixMatrix = (Matrix << 8) | Matrix,
65
+ MatrixPermutation = (Matrix << 8) | Permutation,
66
+ PermutationMatrix = (Permutation << 8) | Matrix
67
+ };
68
+ };
69
+ template <typename Xpr>
70
+ struct TransposeKind {
71
+ static constexpr int Kind = is_matrix_base_xpr<Xpr>::value ? TransposeProductEnum::Matrix
72
+ : is_permutation_base_xpr<Xpr>::value ? TransposeProductEnum::Permutation
73
+ : TransposeProductEnum::Default;
74
+ };
75
+
76
+ template <typename Lhs, typename Rhs>
77
+ struct TransposeProductKind {
78
+ static constexpr int Kind = (TransposeKind<Lhs>::Kind << 8) | TransposeKind<Rhs>::Kind;
79
+ };
80
+
81
+ template <typename Lhs, typename Rhs, int Option, int Kind = TransposeProductKind<Lhs, Rhs>::Kind>
82
+ struct product_transpose_helper {
83
+ // by default, don't optimize the transposed product
84
+ using Derived = Product<Lhs, Rhs, Option>;
85
+ using Scalar = typename Derived::Scalar;
86
+ using TransposeType = Transpose<const Derived>;
87
+ using ConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<Scalar>, TransposeType>;
88
+ using AdjointType = std::conditional_t<NumTraits<Scalar>::IsComplex, ConjugateTransposeType, TransposeType>;
89
+
90
+ // return (lhs * rhs)^T
91
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
92
+ return TransposeType(derived);
93
+ }
94
+ // return (lhs * rhs)^H
95
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
96
+ return AdjointType(TransposeType(derived));
97
+ }
98
+ };
99
+
100
+ template <typename Lhs, typename Rhs, int Option>
101
+ struct product_transpose_helper<Lhs, Rhs, Option, TransposeProductEnum::MatrixMatrix> {
102
+ // expand the transposed matrix-matrix product
103
+ using Derived = Product<Lhs, Rhs, Option>;
104
+
105
+ using LhsScalar = typename traits<Lhs>::Scalar;
106
+ using LhsTransposeType = typename DenseBase<Lhs>::ConstTransposeReturnType;
107
+ using LhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<LhsScalar>, LhsTransposeType>;
108
+ using LhsAdjointType =
109
+ std::conditional_t<NumTraits<LhsScalar>::IsComplex, LhsConjugateTransposeType, LhsTransposeType>;
110
+
111
+ using RhsScalar = typename traits<Rhs>::Scalar;
112
+ using RhsTransposeType = typename DenseBase<Rhs>::ConstTransposeReturnType;
113
+ using RhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<RhsScalar>, RhsTransposeType>;
114
+ using RhsAdjointType =
115
+ std::conditional_t<NumTraits<RhsScalar>::IsComplex, RhsConjugateTransposeType, RhsTransposeType>;
116
+
117
+ using TransposeType = Product<RhsTransposeType, LhsTransposeType, Option>;
118
+ using AdjointType = Product<RhsAdjointType, LhsAdjointType, Option>;
119
+
120
+ // return rhs^T * lhs^T
121
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
122
+ return TransposeType(RhsTransposeType(derived.rhs()), LhsTransposeType(derived.lhs()));
123
+ }
124
+ // return rhs^H * lhs^H
125
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
126
+ return AdjointType(RhsAdjointType(RhsTransposeType(derived.rhs())),
127
+ LhsAdjointType(LhsTransposeType(derived.lhs())));
128
+ }
129
+ };
130
+ template <typename Lhs, typename Rhs, int Option>
131
+ struct product_transpose_helper<Lhs, Rhs, Option, TransposeProductEnum::PermutationMatrix> {
132
+ // expand the transposed permutation-matrix product
133
+ using Derived = Product<Lhs, Rhs, Option>;
134
+
135
+ using LhsInverseType = typename PermutationBase<Lhs>::InverseReturnType;
136
+
137
+ using RhsScalar = typename traits<Rhs>::Scalar;
138
+ using RhsTransposeType = typename DenseBase<Rhs>::ConstTransposeReturnType;
139
+ using RhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<RhsScalar>, RhsTransposeType>;
140
+ using RhsAdjointType =
141
+ std::conditional_t<NumTraits<RhsScalar>::IsComplex, RhsConjugateTransposeType, RhsTransposeType>;
142
+
143
+ using TransposeType = Product<RhsTransposeType, LhsInverseType, Option>;
144
+ using AdjointType = Product<RhsAdjointType, LhsInverseType, Option>;
145
+
146
+ // return rhs^T * lhs^-1
147
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
148
+ return TransposeType(RhsTransposeType(derived.rhs()), LhsInverseType(derived.lhs()));
149
+ }
150
+ // return rhs^H * lhs^-1
151
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
152
+ return AdjointType(RhsAdjointType(RhsTransposeType(derived.rhs())), LhsInverseType(derived.lhs()));
153
+ }
154
+ };
155
+ template <typename Lhs, typename Rhs, int Option>
156
+ struct product_transpose_helper<Lhs, Rhs, Option, TransposeProductEnum::MatrixPermutation> {
157
+ // expand the transposed matrix-permutation product
158
+ using Derived = Product<Lhs, Rhs, Option>;
159
+
160
+ using LhsScalar = typename traits<Lhs>::Scalar;
161
+ using LhsTransposeType = typename DenseBase<Lhs>::ConstTransposeReturnType;
162
+ using LhsConjugateTransposeType = CwiseUnaryOp<scalar_conjugate_op<LhsScalar>, LhsTransposeType>;
163
+ using LhsAdjointType =
164
+ std::conditional_t<NumTraits<LhsScalar>::IsComplex, LhsConjugateTransposeType, LhsTransposeType>;
165
+
166
+ using RhsInverseType = typename PermutationBase<Rhs>::InverseReturnType;
167
+
168
+ using TransposeType = Product<RhsInverseType, LhsTransposeType, Option>;
169
+ using AdjointType = Product<RhsInverseType, LhsAdjointType, Option>;
170
+
171
+ // return rhs^-1 * lhs^T
172
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeType run_transpose(const Derived& derived) {
173
+ return TransposeType(RhsInverseType(derived.rhs()), LhsTransposeType(derived.lhs()));
174
+ }
175
+ // return rhs^-1 * lhs^H
176
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointType run_adjoint(const Derived& derived) {
177
+ return AdjointType(RhsInverseType(derived.rhs()), LhsAdjointType(LhsTransposeType(derived.lhs())));
178
+ }
179
+ };
180
+
181
+ } // end namespace internal
182
+
183
+ /** \class Product
184
+ * \ingroup Core_Module
185
+ *
186
+ * \brief Expression of the product of two arbitrary matrices or vectors
187
+ *
188
+ * \tparam Lhs_ the type of the left-hand side expression
189
+ * \tparam Rhs_ the type of the right-hand side expression
190
+ *
191
+ * This class represents an expression of the product of two arbitrary matrices.
192
+ *
193
+ * The other template parameters are:
194
+ * \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct
195
+ *
196
+ */
197
+ template <typename Lhs_, typename Rhs_, int Option>
198
+ class Product
199
+ : public ProductImpl<Lhs_, Rhs_, Option,
200
+ typename internal::product_promote_storage_type<
201
+ typename internal::traits<Lhs_>::StorageKind, typename internal::traits<Rhs_>::StorageKind,
202
+ internal::product_type<Lhs_, Rhs_>::ret>::ret> {
203
+ public:
204
+ typedef Lhs_ Lhs;
205
+ typedef Rhs_ Rhs;
206
+
207
+ typedef
208
+ typename ProductImpl<Lhs, Rhs, Option,
209
+ typename internal::product_promote_storage_type<
210
+ typename internal::traits<Lhs>::StorageKind, typename internal::traits<Rhs>::StorageKind,
211
+ internal::product_type<Lhs, Rhs>::ret>::ret>::Base Base;
212
+ EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
213
+
214
+ typedef typename internal::ref_selector<Lhs>::type LhsNested;
215
+ typedef typename internal::ref_selector<Rhs>::type RhsNested;
216
+ typedef internal::remove_all_t<LhsNested> LhsNestedCleaned;
217
+ typedef internal::remove_all_t<RhsNested> RhsNestedCleaned;
218
+
219
+ using TransposeReturnType = typename internal::product_transpose_helper<Lhs, Rhs, Option>::TransposeType;
220
+ using AdjointReturnType = typename internal::product_transpose_helper<Lhs, Rhs, Option>::AdjointType;
221
+
222
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {
223
+ eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" &&
224
+ "if you wanted a coeff-wise or a dot product use the respective explicit functions");
225
+ }
226
+
227
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_lhs.rows(); }
228
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
229
+
230
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNestedCleaned& lhs() const { return m_lhs; }
231
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNestedCleaned& rhs() const { return m_rhs; }
232
+
233
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TransposeReturnType transpose() const {
234
+ return internal::product_transpose_helper<Lhs, Rhs, Option>::run_transpose(*this);
235
+ }
236
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE AdjointReturnType adjoint() const {
237
+ return internal::product_transpose_helper<Lhs, Rhs, Option>::run_adjoint(*this);
238
+ }
239
+
240
+ protected:
241
+ LhsNested m_lhs;
242
+ RhsNested m_rhs;
243
+ };
244
+
245
+ namespace internal {
246
+
247
+ template <typename Lhs, typename Rhs, int Option, int ProductTag = internal::product_type<Lhs, Rhs>::ret>
248
+ class dense_product_base : public internal::dense_xpr_base<Product<Lhs, Rhs, Option>>::type {};
249
+
250
+ /** Conversion to scalar for inner-products */
251
+ template <typename Lhs, typename Rhs, int Option>
252
+ class dense_product_base<Lhs, Rhs, Option, InnerProduct>
253
+ : public internal::dense_xpr_base<Product<Lhs, Rhs, Option>>::type {
254
+ typedef Product<Lhs, Rhs, Option> ProductXpr;
255
+ typedef typename internal::dense_xpr_base<ProductXpr>::type Base;
256
+
257
+ public:
258
+ using Base::derived;
259
+ typedef typename Base::Scalar Scalar;
260
+
261
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator const Scalar() const {
262
+ return internal::evaluator<ProductXpr>(derived()).coeff(0, 0);
263
+ }
264
+ };
265
+
266
+ } // namespace internal
267
+
268
+ // Generic API dispatcher
269
+ template <typename Lhs, typename Rhs, int Option, typename StorageKind>
270
+ class ProductImpl : public internal::generic_xpr_base<Product<Lhs, Rhs, Option>, MatrixXpr, StorageKind>::type {
271
+ public:
272
+ typedef typename internal::generic_xpr_base<Product<Lhs, Rhs, Option>, MatrixXpr, StorageKind>::type Base;
273
+ };
274
+
275
+ template <typename Lhs, typename Rhs, int Option>
276
+ class ProductImpl<Lhs, Rhs, Option, Dense> : public internal::dense_product_base<Lhs, Rhs, Option> {
277
+ typedef Product<Lhs, Rhs, Option> Derived;
278
+
279
+ public:
280
+ typedef typename internal::dense_product_base<Lhs, Rhs, Option> Base;
281
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
282
+ protected:
283
+ enum {
284
+ IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) &&
285
+ (ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic),
286
+ EnableCoeff = IsOneByOne || Option == LazyProduct
287
+ };
288
+
289
+ public:
290
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const {
291
+ EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
292
+ eigen_assert((Option == LazyProduct) || (this->rows() == 1 && this->cols() == 1));
293
+
294
+ return internal::evaluator<Derived>(derived()).coeff(row, col);
295
+ }
296
+
297
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const {
298
+ EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);
299
+ eigen_assert((Option == LazyProduct) || (this->rows() == 1 && this->cols() == 1));
300
+
301
+ return internal::evaluator<Derived>(derived()).coeff(i);
302
+ }
303
+ };
304
+
305
+ } // end namespace Eigen
306
+
307
+ #endif // EIGEN_PRODUCT_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/RandomImpl.h ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2024 Charles Schlosser <cs.schlosser@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_RANDOM_IMPL_H
11
+ #define EIGEN_RANDOM_IMPL_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ /****************************************************************************
21
+ * Implementation of random *
22
+ ****************************************************************************/
23
+
24
+ template <typename Scalar, bool IsComplex, bool IsInteger>
25
+ struct random_default_impl {};
26
+
27
+ template <typename Scalar>
28
+ struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
29
+
30
+ template <typename Scalar>
31
+ struct random_retval {
32
+ typedef Scalar type;
33
+ };
34
+
35
+ template <typename Scalar>
36
+ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) {
37
+ return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);
38
+ }
39
+
40
+ template <typename Scalar>
41
+ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() {
42
+ return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
43
+ }
44
+
45
+ // TODO: replace or provide alternatives to this, e.g. std::random_device
46
+ struct eigen_random_device {
47
+ using ReturnType = int;
48
+ static constexpr int Entropy = meta_floor_log2<(unsigned int)(RAND_MAX) + 1>::value;
49
+ static constexpr ReturnType Highest = RAND_MAX;
50
+ static EIGEN_DEVICE_FUNC inline ReturnType run() { return std::rand(); }
51
+ };
52
+
53
+ // Fill a built-in unsigned integer with numRandomBits beginning with the least significant bit
54
+ template <typename Scalar>
55
+ struct random_bits_impl {
56
+ EIGEN_STATIC_ASSERT(std::is_unsigned<Scalar>::value, SCALAR MUST BE A BUILT - IN UNSIGNED INTEGER)
57
+ using RandomDevice = eigen_random_device;
58
+ using RandomReturnType = typename RandomDevice::ReturnType;
59
+ static constexpr int kEntropy = RandomDevice::Entropy;
60
+ static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT;
61
+ // return a Scalar filled with numRandomBits beginning from the least significant bit
62
+ static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
63
+ eigen_assert((numRandomBits >= 0) && (numRandomBits <= kTotalBits));
64
+ const Scalar mask = Scalar(-1) >> ((kTotalBits - numRandomBits) & (kTotalBits - 1));
65
+ Scalar randomBits = 0;
66
+ for (int shift = 0; shift < numRandomBits; shift += kEntropy) {
67
+ RandomReturnType r = RandomDevice::run();
68
+ randomBits |= static_cast<Scalar>(r) << shift;
69
+ }
70
+ // clear the excess bits
71
+ randomBits &= mask;
72
+ return randomBits;
73
+ }
74
+ };
75
+
76
+ template <typename BitsType>
77
+ EIGEN_DEVICE_FUNC inline BitsType getRandomBits(int numRandomBits) {
78
+ return random_bits_impl<BitsType>::run(numRandomBits);
79
+ }
80
+
81
+ // random implementation for a built-in floating point type
82
+ template <typename Scalar, bool BuiltIn = std::is_floating_point<Scalar>::value>
83
+ struct random_float_impl {
84
+ using BitsType = typename numext::get_integer_by_size<sizeof(Scalar)>::unsigned_type;
85
+ static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() {
86
+ const int digits = NumTraits<Scalar>::digits();
87
+ return digits - 1;
88
+ }
89
+ static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
90
+ eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits());
91
+ BitsType randomBits = getRandomBits<BitsType>(numRandomBits);
92
+ // if fewer than MantissaBits is requested, shift them to the left
93
+ randomBits <<= (mantissaBits() - numRandomBits);
94
+ // randomBits is in the half-open interval [2,4)
95
+ randomBits |= numext::bit_cast<BitsType>(Scalar(2));
96
+ // result is in the half-open interval [-1,1)
97
+ Scalar result = numext::bit_cast<Scalar>(randomBits) - Scalar(3);
98
+ return result;
99
+ }
100
+ };
101
+ // random implementation for a custom floating point type
102
+ // uses double as the implementation with a mantissa with a size equal to either the target scalar's mantissa or that of
103
+ // double, whichever is smaller
104
+ template <typename Scalar>
105
+ struct random_float_impl<Scalar, false> {
106
+ static EIGEN_DEVICE_FUNC inline int mantissaBits() {
107
+ const int digits = NumTraits<Scalar>::digits();
108
+ constexpr int kDoubleDigits = NumTraits<double>::digits();
109
+ return numext::mini(digits, kDoubleDigits) - 1;
110
+ }
111
+ static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
112
+ eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits());
113
+ Scalar result = static_cast<Scalar>(random_float_impl<double>::run(numRandomBits));
114
+ return result;
115
+ }
116
+ };
117
+
118
+ // random implementation for long double
119
+ // this specialization is not compatible with double-double scalars
120
+ template <bool Specialize = (sizeof(long double) == 2 * sizeof(uint64_t)) &&
121
+ ((std::numeric_limits<long double>::digits != (2 * std::numeric_limits<double>::digits)))>
122
+ struct random_longdouble_impl {
123
+ static constexpr int Size = sizeof(long double);
124
+ static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() { return NumTraits<long double>::digits() - 1; }
125
+ static EIGEN_DEVICE_FUNC inline long double run(int numRandomBits) {
126
+ eigen_assert(numRandomBits >= 0 && numRandomBits <= mantissaBits());
127
+ EIGEN_USING_STD(memcpy);
128
+ int numLowBits = numext::mini(numRandomBits, 64);
129
+ int numHighBits = numext::maxi(numRandomBits - 64, 0);
130
+ uint64_t randomBits[2];
131
+ long double result = 2.0L;
132
+ memcpy(&randomBits, &result, Size);
133
+ randomBits[0] |= getRandomBits<uint64_t>(numLowBits);
134
+ randomBits[1] |= getRandomBits<uint64_t>(numHighBits);
135
+ memcpy(&result, &randomBits, Size);
136
+ result -= 3.0L;
137
+ return result;
138
+ }
139
+ };
140
+ template <>
141
+ struct random_longdouble_impl<false> {
142
+ static constexpr EIGEN_DEVICE_FUNC inline int mantissaBits() { return NumTraits<double>::digits() - 1; }
143
+ static EIGEN_DEVICE_FUNC inline long double run(int numRandomBits) {
144
+ return static_cast<long double>(random_float_impl<double>::run(numRandomBits));
145
+ }
146
+ };
147
+ template <>
148
+ struct random_float_impl<long double> : random_longdouble_impl<> {};
149
+
150
+ template <typename Scalar>
151
+ struct random_default_impl<Scalar, false, false> {
152
+ using Impl = random_float_impl<Scalar>;
153
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y, int numRandomBits) {
154
+ Scalar half_x = Scalar(0.5) * x;
155
+ Scalar half_y = Scalar(0.5) * y;
156
+ Scalar result = (half_x + half_y) + (half_y - half_x) * run(numRandomBits);
157
+ // result is in the half-open interval [x, y) -- provided that x < y
158
+ return result;
159
+ }
160
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
161
+ return run(x, y, Impl::mantissaBits());
162
+ }
163
+ static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) { return Impl::run(numRandomBits); }
164
+ static EIGEN_DEVICE_FUNC inline Scalar run() { return run(Impl::mantissaBits()); }
165
+ };
166
+
167
+ template <typename Scalar, bool IsSigned = NumTraits<Scalar>::IsSigned, bool BuiltIn = std::is_integral<Scalar>::value>
168
+ struct random_int_impl;
169
+
170
+ // random implementation for a built-in unsigned integer type
171
+ template <typename Scalar>
172
+ struct random_int_impl<Scalar, false, true> {
173
+ static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT;
174
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
175
+ if (y <= x) return x;
176
+ Scalar range = y - x;
177
+ // handle edge case where [x,y] spans the entire range of Scalar
178
+ if (range == NumTraits<Scalar>::highest()) return run();
179
+ Scalar count = range + 1;
180
+ // calculate the number of random bits needed to fill range
181
+ int numRandomBits = log2_ceil(count);
182
+ Scalar randomBits;
183
+ do {
184
+ randomBits = getRandomBits<Scalar>(numRandomBits);
185
+ // if the random draw is outside [0, range), try again (rejection sampling)
186
+ // in the worst-case scenario, the probability of rejection is: 1/2 - 1/2^numRandomBits < 50%
187
+ } while (randomBits >= count);
188
+ Scalar result = x + randomBits;
189
+ return result;
190
+ }
191
+ static EIGEN_DEVICE_FUNC inline Scalar run() { return getRandomBits<Scalar>(kTotalBits); }
192
+ };
193
+
194
+ // random implementation for a built-in signed integer type
195
+ template <typename Scalar>
196
+ struct random_int_impl<Scalar, true, true> {
197
+ static constexpr int kTotalBits = sizeof(Scalar) * CHAR_BIT;
198
+ using BitsType = typename make_unsigned<Scalar>::type;
199
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
200
+ if (y <= x) return x;
201
+ // Avoid overflow by representing `range` as an unsigned type
202
+ BitsType range = static_cast<BitsType>(y) - static_cast<BitsType>(x);
203
+ BitsType randomBits = random_int_impl<BitsType>::run(0, range);
204
+ // Avoid overflow in the case where `x` is negative and there is a large range so
205
+ // `randomBits` would also be negative if cast to `Scalar` first.
206
+ Scalar result = static_cast<Scalar>(static_cast<BitsType>(x) + randomBits);
207
+ return result;
208
+ }
209
+ static EIGEN_DEVICE_FUNC inline Scalar run() { return static_cast<Scalar>(getRandomBits<BitsType>(kTotalBits)); }
210
+ };
211
+
212
+ // todo: custom integers
213
+ template <typename Scalar, bool IsSigned>
214
+ struct random_int_impl<Scalar, IsSigned, false> {
215
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar&, const Scalar&) { return run(); }
216
+ static EIGEN_DEVICE_FUNC inline Scalar run() {
217
+ eigen_assert(std::false_type::value && "RANDOM FOR CUSTOM INTEGERS NOT YET SUPPORTED");
218
+ return Scalar(0);
219
+ }
220
+ };
221
+
222
+ template <typename Scalar>
223
+ struct random_default_impl<Scalar, false, true> : random_int_impl<Scalar> {};
224
+
225
+ template <>
226
+ struct random_impl<bool> {
227
+ static EIGEN_DEVICE_FUNC inline bool run(const bool& x, const bool& y) {
228
+ if (y <= x) return x;
229
+ return run();
230
+ }
231
+ static EIGEN_DEVICE_FUNC inline bool run() { return getRandomBits<unsigned>(1) ? true : false; }
232
+ };
233
+
234
+ template <typename Scalar>
235
+ struct random_default_impl<Scalar, true, false> {
236
+ typedef typename NumTraits<Scalar>::Real RealScalar;
237
+ using Impl = random_impl<RealScalar>;
238
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y, int numRandomBits) {
239
+ return Scalar(Impl::run(x.real(), y.real(), numRandomBits), Impl::run(x.imag(), y.imag(), numRandomBits));
240
+ }
241
+ static EIGEN_DEVICE_FUNC inline Scalar run(const Scalar& x, const Scalar& y) {
242
+ return Scalar(Impl::run(x.real(), y.real()), Impl::run(x.imag(), y.imag()));
243
+ }
244
+ static EIGEN_DEVICE_FUNC inline Scalar run(int numRandomBits) {
245
+ return Scalar(Impl::run(numRandomBits), Impl::run(numRandomBits));
246
+ }
247
+ static EIGEN_DEVICE_FUNC inline Scalar run() { return Scalar(Impl::run(), Impl::run()); }
248
+ };
249
+
250
+ } // namespace internal
251
+ } // namespace Eigen
252
+
253
+ #endif // EIGEN_RANDOM_IMPL_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Redux.h ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_REDUX_H
12
+ #define EIGEN_REDUX_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+
21
+ // TODO
22
+ // * implement other kind of vectorization
23
+ // * factorize code
24
+
25
+ /***************************************************************************
26
+ * Part 1 : the logic deciding a strategy for vectorization and unrolling
27
+ ***************************************************************************/
28
+
29
+ template <typename Func, typename Evaluator>
30
+ struct redux_traits {
31
+ public:
32
+ typedef typename find_best_packet<typename Evaluator::Scalar, Evaluator::SizeAtCompileTime>::type PacketType;
33
+ enum {
34
+ PacketSize = unpacket_traits<PacketType>::size,
35
+ InnerMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxColsAtCompileTime : Evaluator::MaxRowsAtCompileTime,
36
+ OuterMaxSize = int(Evaluator::IsRowMajor) ? Evaluator::MaxRowsAtCompileTime : Evaluator::MaxColsAtCompileTime,
37
+ SliceVectorizedWork = int(InnerMaxSize) == Dynamic ? Dynamic
38
+ : int(OuterMaxSize) == Dynamic ? (int(InnerMaxSize) >= int(PacketSize) ? Dynamic : 0)
39
+ : (int(InnerMaxSize) / int(PacketSize)) * int(OuterMaxSize)
40
+ };
41
+
42
+ enum {
43
+ MayLinearize = (int(Evaluator::Flags) & LinearAccessBit),
44
+ MightVectorize = (int(Evaluator::Flags) & ActualPacketAccessBit) && (functor_traits<Func>::PacketAccess),
45
+ MayLinearVectorize = bool(MightVectorize) && bool(MayLinearize),
46
+ MaySliceVectorize = bool(MightVectorize) && (int(SliceVectorizedWork) == Dynamic || int(SliceVectorizedWork) >= 3)
47
+ };
48
+
49
+ public:
50
+ enum {
51
+ Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
52
+ : int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
53
+ : int(MayLinearize) ? int(LinearTraversal)
54
+ : int(DefaultTraversal)
55
+ };
56
+
57
+ public:
58
+ enum {
59
+ Cost = Evaluator::SizeAtCompileTime == Dynamic
60
+ ? HugeCost
61
+ : int(Evaluator::SizeAtCompileTime) * int(Evaluator::CoeffReadCost) +
62
+ (Evaluator::SizeAtCompileTime - 1) * functor_traits<Func>::Cost,
63
+ UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
64
+ };
65
+
66
+ public:
67
+ enum { Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling };
68
+
69
+ #ifdef EIGEN_DEBUG_ASSIGN
70
+ static void debug() {
71
+ std::cerr << "Xpr: " << typeid(typename Evaluator::XprType).name() << std::endl;
72
+ std::cerr.setf(std::ios::hex, std::ios::basefield);
73
+ EIGEN_DEBUG_VAR(Evaluator::Flags)
74
+ std::cerr.unsetf(std::ios::hex);
75
+ EIGEN_DEBUG_VAR(InnerMaxSize)
76
+ EIGEN_DEBUG_VAR(OuterMaxSize)
77
+ EIGEN_DEBUG_VAR(SliceVectorizedWork)
78
+ EIGEN_DEBUG_VAR(PacketSize)
79
+ EIGEN_DEBUG_VAR(MightVectorize)
80
+ EIGEN_DEBUG_VAR(MayLinearVectorize)
81
+ EIGEN_DEBUG_VAR(MaySliceVectorize)
82
+ std::cerr << "Traversal"
83
+ << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl;
84
+ EIGEN_DEBUG_VAR(UnrollingLimit)
85
+ std::cerr << "Unrolling"
86
+ << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl;
87
+ std::cerr << std::endl;
88
+ }
89
+ #endif
90
+ };
91
+
92
+ /***************************************************************************
93
+ * Part 2 : unrollers
94
+ ***************************************************************************/
95
+
96
+ /*** no vectorization ***/
97
+
98
+ template <typename Func, typename Evaluator, Index Start, Index Length>
99
+ struct redux_novec_unroller {
100
+ static constexpr Index HalfLength = Length / 2;
101
+
102
+ typedef typename Evaluator::Scalar Scalar;
103
+
104
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) {
105
+ return func(redux_novec_unroller<Func, Evaluator, Start, HalfLength>::run(eval, func),
106
+ redux_novec_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::run(eval, func));
107
+ }
108
+ };
109
+
110
+ template <typename Func, typename Evaluator, Index Start>
111
+ struct redux_novec_unroller<Func, Evaluator, Start, 1> {
112
+ static constexpr Index outer = Start / Evaluator::InnerSizeAtCompileTime;
113
+ static constexpr Index inner = Start % Evaluator::InnerSizeAtCompileTime;
114
+
115
+ typedef typename Evaluator::Scalar Scalar;
116
+
117
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) {
118
+ return eval.coeffByOuterInner(outer, inner);
119
+ }
120
+ };
121
+
122
+ // This is actually dead code and will never be called. It is required
123
+ // to prevent false warnings regarding failed inlining though
124
+ // for 0 length run() will never be called at all.
125
+ template <typename Func, typename Evaluator, Index Start>
126
+ struct redux_novec_unroller<Func, Evaluator, Start, 0> {
127
+ typedef typename Evaluator::Scalar Scalar;
128
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
129
+ };
130
+
131
+ template <typename Func, typename Evaluator, Index Start, Index Length>
132
+ struct redux_novec_linear_unroller {
133
+ static constexpr Index HalfLength = Length / 2;
134
+
135
+ typedef typename Evaluator::Scalar Scalar;
136
+
137
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func) {
138
+ return func(redux_novec_linear_unroller<Func, Evaluator, Start, HalfLength>::run(eval, func),
139
+ redux_novec_linear_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::run(eval, func));
140
+ }
141
+ };
142
+
143
+ template <typename Func, typename Evaluator, Index Start>
144
+ struct redux_novec_linear_unroller<Func, Evaluator, Start, 1> {
145
+ typedef typename Evaluator::Scalar Scalar;
146
+
147
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func&) {
148
+ return eval.coeff(Start);
149
+ }
150
+ };
151
+
152
+ // This is actually dead code and will never be called. It is required
153
+ // to prevent false warnings regarding failed inlining though
154
+ // for 0 length run() will never be called at all.
155
+ template <typename Func, typename Evaluator, Index Start>
156
+ struct redux_novec_linear_unroller<Func, Evaluator, Start, 0> {
157
+ typedef typename Evaluator::Scalar Scalar;
158
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator&, const Func&) { return Scalar(); }
159
+ };
160
+
161
+ /*** vectorization ***/
162
+
163
+ template <typename Func, typename Evaluator, Index Start, Index Length>
164
+ struct redux_vec_unroller {
165
+ template <typename PacketType>
166
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) {
167
+ constexpr Index HalfLength = Length / 2;
168
+
169
+ return func.packetOp(
170
+ redux_vec_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval, func),
171
+ redux_vec_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::template run<PacketType>(eval,
172
+ func));
173
+ }
174
+ };
175
+
176
+ template <typename Func, typename Evaluator, Index Start>
177
+ struct redux_vec_unroller<Func, Evaluator, Start, 1> {
178
+ template <typename PacketType>
179
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) {
180
+ constexpr Index PacketSize = unpacket_traits<PacketType>::size;
181
+ constexpr Index index = Start * PacketSize;
182
+ constexpr Index outer = index / int(Evaluator::InnerSizeAtCompileTime);
183
+ constexpr Index inner = index % int(Evaluator::InnerSizeAtCompileTime);
184
+ constexpr int alignment = Evaluator::Alignment;
185
+
186
+ return eval.template packetByOuterInner<alignment, PacketType>(outer, inner);
187
+ }
188
+ };
189
+
190
+ template <typename Func, typename Evaluator, Index Start, Index Length>
191
+ struct redux_vec_linear_unroller {
192
+ template <typename PacketType>
193
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func& func) {
194
+ constexpr Index HalfLength = Length / 2;
195
+
196
+ return func.packetOp(
197
+ redux_vec_linear_unroller<Func, Evaluator, Start, HalfLength>::template run<PacketType>(eval, func),
198
+ redux_vec_linear_unroller<Func, Evaluator, Start + HalfLength, Length - HalfLength>::template run<PacketType>(
199
+ eval, func));
200
+ }
201
+ };
202
+
203
+ template <typename Func, typename Evaluator, Index Start>
204
+ struct redux_vec_linear_unroller<Func, Evaluator, Start, 1> {
205
+ template <typename PacketType>
206
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE PacketType run(const Evaluator& eval, const Func&) {
207
+ constexpr Index PacketSize = unpacket_traits<PacketType>::size;
208
+ constexpr Index index = (Start * PacketSize);
209
+ constexpr int alignment = Evaluator::Alignment;
210
+ return eval.template packet<alignment, PacketType>(index);
211
+ }
212
+ };
213
+
214
+ /***************************************************************************
215
+ * Part 3 : implementation of all cases
216
+ ***************************************************************************/
217
+
218
+ template <typename Func, typename Evaluator, int Traversal = redux_traits<Func, Evaluator>::Traversal,
219
+ int Unrolling = redux_traits<Func, Evaluator>::Unrolling>
220
+ struct redux_impl;
221
+
222
+ template <typename Func, typename Evaluator>
223
+ struct redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling> {
224
+ typedef typename Evaluator::Scalar Scalar;
225
+
226
+ template <typename XprType>
227
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
228
+ eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
229
+ Scalar res = eval.coeffByOuterInner(0, 0);
230
+ for (Index i = 1; i < xpr.innerSize(); ++i) res = func(res, eval.coeffByOuterInner(0, i));
231
+ for (Index i = 1; i < xpr.outerSize(); ++i)
232
+ for (Index j = 0; j < xpr.innerSize(); ++j) res = func(res, eval.coeffByOuterInner(i, j));
233
+ return res;
234
+ }
235
+ };
236
+
237
+ template <typename Func, typename Evaluator>
238
+ struct redux_impl<Func, Evaluator, LinearTraversal, NoUnrolling> {
239
+ typedef typename Evaluator::Scalar Scalar;
240
+
241
+ template <typename XprType>
242
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
243
+ eigen_assert(xpr.size() > 0 && "you are using an empty matrix");
244
+ Scalar res = eval.coeff(0);
245
+ for (Index k = 1; k < xpr.size(); ++k) res = func(res, eval.coeff(k));
246
+ return res;
247
+ }
248
+ };
249
+
250
+ template <typename Func, typename Evaluator>
251
+ struct redux_impl<Func, Evaluator, DefaultTraversal, CompleteUnrolling>
252
+ : redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> {
253
+ typedef redux_novec_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
254
+ typedef typename Evaluator::Scalar Scalar;
255
+ template <typename XprType>
256
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func,
257
+ const XprType& /*xpr*/) {
258
+ return Base::run(eval, func);
259
+ }
260
+ };
261
+
262
+ template <typename Func, typename Evaluator>
263
+ struct redux_impl<Func, Evaluator, LinearTraversal, CompleteUnrolling>
264
+ : redux_novec_linear_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> {
265
+ typedef redux_novec_linear_unroller<Func, Evaluator, 0, Evaluator::SizeAtCompileTime> Base;
266
+ typedef typename Evaluator::Scalar Scalar;
267
+ template <typename XprType>
268
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func,
269
+ const XprType& /*xpr*/) {
270
+ return Base::run(eval, func);
271
+ }
272
+ };
273
+
274
+ template <typename Func, typename Evaluator>
275
+ struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, NoUnrolling> {
276
+ typedef typename Evaluator::Scalar Scalar;
277
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketScalar;
278
+
279
+ template <typename XprType>
280
+ static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
281
+ const Index size = xpr.size();
282
+
283
+ constexpr Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
284
+ constexpr int packetAlignment = unpacket_traits<PacketScalar>::alignment;
285
+ constexpr int alignment0 =
286
+ (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar))
287
+ ? int(packetAlignment)
288
+ : int(Unaligned);
289
+ constexpr int alignment = plain_enum_max(alignment0, Evaluator::Alignment);
290
+ const Index alignedStart = internal::first_default_aligned(xpr);
291
+ const Index alignedSize2 = ((size - alignedStart) / (2 * packetSize)) * (2 * packetSize);
292
+ const Index alignedSize = ((size - alignedStart) / (packetSize)) * (packetSize);
293
+ const Index alignedEnd2 = alignedStart + alignedSize2;
294
+ const Index alignedEnd = alignedStart + alignedSize;
295
+ Scalar res;
296
+ if (alignedSize) {
297
+ PacketScalar packet_res0 = eval.template packet<alignment, PacketScalar>(alignedStart);
298
+ if (alignedSize > packetSize) // we have at least two packets to partly unroll the loop
299
+ {
300
+ PacketScalar packet_res1 = eval.template packet<alignment, PacketScalar>(alignedStart + packetSize);
301
+ for (Index index = alignedStart + 2 * packetSize; index < alignedEnd2; index += 2 * packetSize) {
302
+ packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment, PacketScalar>(index));
303
+ packet_res1 = func.packetOp(packet_res1, eval.template packet<alignment, PacketScalar>(index + packetSize));
304
+ }
305
+
306
+ packet_res0 = func.packetOp(packet_res0, packet_res1);
307
+ if (alignedEnd > alignedEnd2)
308
+ packet_res0 = func.packetOp(packet_res0, eval.template packet<alignment, PacketScalar>(alignedEnd2));
309
+ }
310
+ res = func.predux(packet_res0);
311
+
312
+ for (Index index = 0; index < alignedStart; ++index) res = func(res, eval.coeff(index));
313
+
314
+ for (Index index = alignedEnd; index < size; ++index) res = func(res, eval.coeff(index));
315
+ } else // too small to vectorize anything.
316
+ // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
317
+ {
318
+ res = eval.coeff(0);
319
+ for (Index index = 1; index < size; ++index) res = func(res, eval.coeff(index));
320
+ }
321
+
322
+ return res;
323
+ }
324
+ };
325
+
326
+ // NOTE: for SliceVectorizedTraversal we simply bypass unrolling
327
+ template <typename Func, typename Evaluator, int Unrolling>
328
+ struct redux_impl<Func, Evaluator, SliceVectorizedTraversal, Unrolling> {
329
+ typedef typename Evaluator::Scalar Scalar;
330
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
331
+
332
+ template <typename XprType>
333
+ EIGEN_DEVICE_FUNC static Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
334
+ eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
335
+ constexpr Index packetSize = redux_traits<Func, Evaluator>::PacketSize;
336
+ const Index innerSize = xpr.innerSize();
337
+ const Index outerSize = xpr.outerSize();
338
+ const Index packetedInnerSize = ((innerSize) / packetSize) * packetSize;
339
+ Scalar res;
340
+ if (packetedInnerSize) {
341
+ PacketType packet_res = eval.template packet<Unaligned, PacketType>(0, 0);
342
+ for (Index j = 0; j < outerSize; ++j)
343
+ for (Index i = (j == 0 ? packetSize : 0); i < packetedInnerSize; i += Index(packetSize))
344
+ packet_res = func.packetOp(packet_res, eval.template packetByOuterInner<Unaligned, PacketType>(j, i));
345
+
346
+ res = func.predux(packet_res);
347
+ for (Index j = 0; j < outerSize; ++j)
348
+ for (Index i = packetedInnerSize; i < innerSize; ++i) res = func(res, eval.coeffByOuterInner(j, i));
349
+ } else // too small to vectorize anything.
350
+ // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
351
+ {
352
+ res = redux_impl<Func, Evaluator, DefaultTraversal, NoUnrolling>::run(eval, func, xpr);
353
+ }
354
+
355
+ return res;
356
+ }
357
+ };
358
+
359
+ template <typename Func, typename Evaluator>
360
+ struct redux_impl<Func, Evaluator, LinearVectorizedTraversal, CompleteUnrolling> {
361
+ typedef typename Evaluator::Scalar Scalar;
362
+
363
+ typedef typename redux_traits<Func, Evaluator>::PacketType PacketType;
364
+ static constexpr Index PacketSize = redux_traits<Func, Evaluator>::PacketSize;
365
+ static constexpr Index Size = Evaluator::SizeAtCompileTime;
366
+ static constexpr Index VectorizedSize = (int(Size) / int(PacketSize)) * int(PacketSize);
367
+
368
+ template <typename XprType>
369
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Evaluator& eval, const Func& func, const XprType& xpr) {
370
+ EIGEN_ONLY_USED_FOR_DEBUG(xpr)
371
+ eigen_assert(xpr.rows() > 0 && xpr.cols() > 0 && "you are using an empty matrix");
372
+ if (VectorizedSize > 0) {
373
+ Scalar res = func.predux(
374
+ redux_vec_linear_unroller<Func, Evaluator, 0, Size / PacketSize>::template run<PacketType>(eval, func));
375
+ if (VectorizedSize != Size)
376
+ res = func(
377
+ res, redux_novec_linear_unroller<Func, Evaluator, VectorizedSize, Size - VectorizedSize>::run(eval, func));
378
+ return res;
379
+ } else {
380
+ return redux_novec_linear_unroller<Func, Evaluator, 0, Size>::run(eval, func);
381
+ }
382
+ }
383
+ };
384
+
385
+ // evaluator adaptor
386
+ template <typename XprType_>
387
+ class redux_evaluator : public internal::evaluator<XprType_> {
388
+ typedef internal::evaluator<XprType_> Base;
389
+
390
+ public:
391
+ typedef XprType_ XprType;
392
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit redux_evaluator(const XprType& xpr) : Base(xpr) {}
393
+
394
+ typedef typename XprType::Scalar Scalar;
395
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
396
+ typedef typename XprType::PacketScalar PacketScalar;
397
+
398
+ enum {
399
+ MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
400
+ MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
401
+ // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime
402
+ // from the evaluator
403
+ Flags = Base::Flags & ~DirectAccessBit,
404
+ IsRowMajor = XprType::IsRowMajor,
405
+ SizeAtCompileTime = XprType::SizeAtCompileTime,
406
+ InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime
407
+ };
408
+
409
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const {
410
+ return Base::coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer);
411
+ }
412
+
413
+ template <int LoadMode, typename PacketType>
414
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketType packetByOuterInner(Index outer, Index inner) const {
415
+ return Base::template packet<LoadMode, PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer);
416
+ }
417
+ };
418
+
419
+ } // end namespace internal
420
+
421
+ /***************************************************************************
422
+ * Part 4 : public API
423
+ ***************************************************************************/
424
+
425
+ /** \returns the result of a full redux operation on the whole matrix or vector using \a func
426
+ *
427
+ * The template parameter \a BinaryOp is the type of the functor \a func which must be
428
+ * an associative operator. Both current C++98 and C++11 functor styles are handled.
429
+ *
430
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
431
+ *
432
+ * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
433
+ */
434
+ template <typename Derived>
435
+ template <typename Func>
436
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::redux(
437
+ const Func& func) const {
438
+ eigen_assert(this->rows() > 0 && this->cols() > 0 && "you are using an empty matrix");
439
+
440
+ typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
441
+ ThisEvaluator thisEval(derived());
442
+
443
+ // The initial expression is passed to the reducer as an additional argument instead of
444
+ // passing it as a member of redux_evaluator to help
445
+ return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func, derived());
446
+ }
447
+
448
+ /** \returns the minimum of all coefficients of \c *this.
449
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
450
+ * NaNPropagation == PropagateFast : undefined
451
+ * NaNPropagation == PropagateNaN : result is NaN
452
+ * NaNPropagation == PropagateNumbers : result is minimum of elements that are not NaN
453
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
454
+ */
455
+ template <typename Derived>
456
+ template <int NaNPropagation>
457
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff() const {
458
+ return derived().redux(Eigen::internal::scalar_min_op<Scalar, Scalar, NaNPropagation>());
459
+ }
460
+
461
+ /** \returns the maximum of all coefficients of \c *this.
462
+ * In case \c *this contains NaN, NaNPropagation determines the behavior:
463
+ * NaNPropagation == PropagateFast : undefined
464
+ * NaNPropagation == PropagateNaN : result is NaN
465
+ * NaNPropagation == PropagateNumbers : result is maximum of elements that are not NaN
466
+ * \warning the matrix must be not empty, otherwise an assertion is triggered.
467
+ */
468
+ template <typename Derived>
469
+ template <int NaNPropagation>
470
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff() const {
471
+ return derived().redux(Eigen::internal::scalar_max_op<Scalar, Scalar, NaNPropagation>());
472
+ }
473
+
474
+ /** \returns the sum of all coefficients of \c *this
475
+ *
476
+ * If \c *this is empty, then the value 0 is returned.
477
+ *
478
+ * \sa trace(), prod(), mean()
479
+ */
480
+ template <typename Derived>
481
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::sum() const {
482
+ if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(0);
483
+ return derived().redux(Eigen::internal::scalar_sum_op<Scalar, Scalar>());
484
+ }
485
+
486
+ /** \returns the mean of all coefficients of *this
487
+ *
488
+ * \sa trace(), prod(), sum()
489
+ */
490
+ template <typename Derived>
491
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::mean() const {
492
+ #ifdef __INTEL_COMPILER
493
+ #pragma warning push
494
+ #pragma warning(disable : 2259)
495
+ #endif
496
+ return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar, Scalar>())) / Scalar(this->size());
497
+ #ifdef __INTEL_COMPILER
498
+ #pragma warning pop
499
+ #endif
500
+ }
501
+
502
+ /** \returns the product of all coefficients of *this
503
+ *
504
+ * Example: \include MatrixBase_prod.cpp
505
+ * Output: \verbinclude MatrixBase_prod.out
506
+ *
507
+ * \sa sum(), mean(), trace()
508
+ */
509
+ template <typename Derived>
510
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::prod() const {
511
+ if (SizeAtCompileTime == 0 || (SizeAtCompileTime == Dynamic && size() == 0)) return Scalar(1);
512
+ return derived().redux(Eigen::internal::scalar_product_op<Scalar>());
513
+ }
514
+
515
+ /** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
516
+ *
517
+ * \c *this can be any matrix, not necessarily square.
518
+ *
519
+ * \sa diagonal(), sum()
520
+ */
521
+ template <typename Derived>
522
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar MatrixBase<Derived>::trace() const {
523
+ return derived().diagonal().sum();
524
+ }
525
+
526
+ } // end namespace Eigen
527
+
528
+ #endif // EIGEN_REDUX_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Ref.h ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_REF_H
11
+ #define EIGEN_REF_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ template <typename PlainObjectType_, int Options_, typename StrideType_>
21
+ struct traits<Ref<PlainObjectType_, Options_, StrideType_> >
22
+ : public traits<Map<PlainObjectType_, Options_, StrideType_> > {
23
+ typedef PlainObjectType_ PlainObjectType;
24
+ typedef StrideType_ StrideType;
25
+ enum {
26
+ Options = Options_,
27
+ Flags = traits<Map<PlainObjectType_, Options_, StrideType_> >::Flags | NestByRefBit,
28
+ Alignment = traits<Map<PlainObjectType_, Options_, StrideType_> >::Alignment,
29
+ InnerStrideAtCompileTime = traits<Map<PlainObjectType_, Options_, StrideType_> >::InnerStrideAtCompileTime,
30
+ OuterStrideAtCompileTime = traits<Map<PlainObjectType_, Options_, StrideType_> >::OuterStrideAtCompileTime
31
+ };
32
+
33
+ template <typename Derived>
34
+ struct match {
35
+ enum {
36
+ IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime,
37
+ HasDirectAccess = internal::has_direct_access<Derived>::ret,
38
+ StorageOrderMatch =
39
+ IsVectorAtCompileTime || ((PlainObjectType::Flags & RowMajorBit) == (Derived::Flags & RowMajorBit)),
40
+ InnerStrideMatch = int(InnerStrideAtCompileTime) == int(Dynamic) ||
41
+ int(InnerStrideAtCompileTime) == int(Derived::InnerStrideAtCompileTime) ||
42
+ (int(InnerStrideAtCompileTime) == 0 && int(Derived::InnerStrideAtCompileTime) == 1),
43
+ OuterStrideMatch = IsVectorAtCompileTime || int(OuterStrideAtCompileTime) == int(Dynamic) ||
44
+ int(OuterStrideAtCompileTime) == int(Derived::OuterStrideAtCompileTime),
45
+ // NOTE, this indirection of evaluator<Derived>::Alignment is needed
46
+ // to workaround a very strange bug in MSVC related to the instantiation
47
+ // of has_*ary_operator in evaluator<CwiseNullaryOp>.
48
+ // This line is surprisingly very sensitive. For instance, simply adding parenthesis
49
+ // as "DerivedAlignment = (int(evaluator<Derived>::Alignment))," will make MSVC fail...
50
+ DerivedAlignment = int(evaluator<Derived>::Alignment),
51
+ AlignmentMatch = (int(traits<PlainObjectType>::Alignment) == int(Unaligned)) ||
52
+ (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should
53
+ // be replaced by the required alignment
54
+ ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
55
+ MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch &&
56
+ AlignmentMatch && ScalarTypeMatch
57
+ };
58
+ typedef std::conditional_t<MatchAtCompileTime, internal::true_type, internal::false_type> type;
59
+ };
60
+ };
61
+
62
+ template <typename Derived>
63
+ struct traits<RefBase<Derived> > : public traits<Derived> {};
64
+
65
+ } // namespace internal
66
+
67
+ template <typename Derived>
68
+ class RefBase : public MapBase<Derived> {
69
+ typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
70
+ typedef typename internal::traits<Derived>::StrideType StrideType;
71
+
72
+ public:
73
+ typedef MapBase<Derived> Base;
74
+ EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
75
+
76
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const {
77
+ return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
78
+ }
79
+
80
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const {
81
+ return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
82
+ : IsVectorAtCompileTime ? this->size()
83
+ : int(Flags) & RowMajorBit ? this->cols()
84
+ : this->rows();
85
+ }
86
+
87
+ EIGEN_DEVICE_FUNC RefBase()
88
+ : Base(0, RowsAtCompileTime == Dynamic ? 0 : RowsAtCompileTime,
89
+ ColsAtCompileTime == Dynamic ? 0 : ColsAtCompileTime),
90
+ // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
91
+ m_stride(StrideType::OuterStrideAtCompileTime == Dynamic ? 0 : StrideType::OuterStrideAtCompileTime,
92
+ StrideType::InnerStrideAtCompileTime == Dynamic ? 0 : StrideType::InnerStrideAtCompileTime) {}
93
+
94
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
95
+
96
+ protected:
97
+ typedef Stride<StrideType::OuterStrideAtCompileTime, StrideType::InnerStrideAtCompileTime> StrideBase;
98
+
99
+ // Resolves inner stride if default 0.
100
+ static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveInnerStride(Index inner) { return inner == 0 ? 1 : inner; }
101
+
102
+ // Resolves outer stride if default 0.
103
+ static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index resolveOuterStride(Index inner, Index outer, Index rows, Index cols,
104
+ bool isVectorAtCompileTime, bool isRowMajor) {
105
+ return outer == 0 ? isVectorAtCompileTime ? inner * rows * cols : isRowMajor ? inner * cols : inner * rows : outer;
106
+ }
107
+
108
+ // Returns true if construction is valid, false if there is a stride mismatch,
109
+ // and fails if there is a size mismatch.
110
+ template <typename Expression>
111
+ EIGEN_DEVICE_FUNC bool construct(Expression& expr) {
112
+ // Check matrix sizes. If this is a compile-time vector, we do allow
113
+ // implicitly transposing.
114
+ EIGEN_STATIC_ASSERT(EIGEN_PREDICATE_SAME_MATRIX_SIZE(PlainObjectType, Expression)
115
+ // If it is a vector, the transpose sizes might match.
116
+ || (PlainObjectType::IsVectorAtCompileTime &&
117
+ ((int(PlainObjectType::RowsAtCompileTime) == Eigen::Dynamic ||
118
+ int(Expression::ColsAtCompileTime) == Eigen::Dynamic ||
119
+ int(PlainObjectType::RowsAtCompileTime) == int(Expression::ColsAtCompileTime)) &&
120
+ (int(PlainObjectType::ColsAtCompileTime) == Eigen::Dynamic ||
121
+ int(Expression::RowsAtCompileTime) == Eigen::Dynamic ||
122
+ int(PlainObjectType::ColsAtCompileTime) == int(Expression::RowsAtCompileTime)))),
123
+ YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
124
+
125
+ // Determine runtime rows and columns.
126
+ Index rows = expr.rows();
127
+ Index cols = expr.cols();
128
+ if (PlainObjectType::RowsAtCompileTime == 1) {
129
+ eigen_assert(expr.rows() == 1 || expr.cols() == 1);
130
+ rows = 1;
131
+ cols = expr.size();
132
+ } else if (PlainObjectType::ColsAtCompileTime == 1) {
133
+ eigen_assert(expr.rows() == 1 || expr.cols() == 1);
134
+ rows = expr.size();
135
+ cols = 1;
136
+ }
137
+ // Verify that the sizes are valid.
138
+ eigen_assert((PlainObjectType::RowsAtCompileTime == Dynamic) || (PlainObjectType::RowsAtCompileTime == rows));
139
+ eigen_assert((PlainObjectType::ColsAtCompileTime == Dynamic) || (PlainObjectType::ColsAtCompileTime == cols));
140
+
141
+ // If this is a vector, we might be transposing, which means that stride should swap.
142
+ const bool transpose = PlainObjectType::IsVectorAtCompileTime && (rows != expr.rows());
143
+ // If the storage format differs, we also need to swap the stride.
144
+ const bool row_major = ((PlainObjectType::Flags)&RowMajorBit) != 0;
145
+ const bool expr_row_major = (Expression::Flags & RowMajorBit) != 0;
146
+ const bool storage_differs = (row_major != expr_row_major);
147
+
148
+ const bool swap_stride = (transpose != storage_differs);
149
+
150
+ // Determine expr's actual strides, resolving any defaults if zero.
151
+ const Index expr_inner_actual = resolveInnerStride(expr.innerStride());
152
+ const Index expr_outer_actual = resolveOuterStride(expr_inner_actual, expr.outerStride(), expr.rows(), expr.cols(),
153
+ Expression::IsVectorAtCompileTime != 0, expr_row_major);
154
+
155
+ // If this is a column-major row vector or row-major column vector, the inner-stride
156
+ // is arbitrary, so set it to either the compile-time inner stride or 1.
157
+ const bool row_vector = (rows == 1);
158
+ const bool col_vector = (cols == 1);
159
+ const Index inner_stride =
160
+ ((!row_major && row_vector) || (row_major && col_vector))
161
+ ? (StrideType::InnerStrideAtCompileTime > 0 ? Index(StrideType::InnerStrideAtCompileTime) : 1)
162
+ : swap_stride ? expr_outer_actual
163
+ : expr_inner_actual;
164
+
165
+ // If this is a column-major column vector or row-major row vector, the outer-stride
166
+ // is arbitrary, so set it to either the compile-time outer stride or vector size.
167
+ const Index outer_stride =
168
+ ((!row_major && col_vector) || (row_major && row_vector))
169
+ ? (StrideType::OuterStrideAtCompileTime > 0 ? Index(StrideType::OuterStrideAtCompileTime)
170
+ : rows * cols * inner_stride)
171
+ : swap_stride ? expr_inner_actual
172
+ : expr_outer_actual;
173
+
174
+ // Check if given inner/outer strides are compatible with compile-time strides.
175
+ const bool inner_valid = (StrideType::InnerStrideAtCompileTime == Dynamic) ||
176
+ (resolveInnerStride(Index(StrideType::InnerStrideAtCompileTime)) == inner_stride);
177
+ if (!inner_valid) {
178
+ return false;
179
+ }
180
+
181
+ const bool outer_valid =
182
+ (StrideType::OuterStrideAtCompileTime == Dynamic) ||
183
+ (resolveOuterStride(inner_stride, Index(StrideType::OuterStrideAtCompileTime), rows, cols,
184
+ PlainObjectType::IsVectorAtCompileTime != 0, row_major) == outer_stride);
185
+ if (!outer_valid) {
186
+ return false;
187
+ }
188
+
189
+ internal::construct_at<Base>(this, expr.data(), rows, cols);
190
+ internal::construct_at(&m_stride, (StrideType::OuterStrideAtCompileTime == 0) ? 0 : outer_stride,
191
+ (StrideType::InnerStrideAtCompileTime == 0) ? 0 : inner_stride);
192
+ return true;
193
+ }
194
+
195
+ StrideBase m_stride;
196
+ };
197
+
198
+ /** \class Ref
199
+ * \ingroup Core_Module
200
+ *
201
+ * \brief A matrix or vector expression mapping an existing expression
202
+ *
203
+ * \tparam PlainObjectType the equivalent matrix type of the mapped data
204
+ * \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32,
205
+ * \c #Aligned16, \c #Aligned8 or \c #Unaligned. The default is \c #Unaligned. \tparam StrideType optionally specifies
206
+ * strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), but accepts a
207
+ * variable outer stride (leading dimension). This can be overridden by specifying strides. The type passed here must be
208
+ * a specialization of the Stride template, see examples below.
209
+ *
210
+ * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the
211
+ * number of copies. A Ref<> object can represent either a const expression or a l-value: \code
212
+ * // in-out argument:
213
+ * void foo1(Ref<VectorXf> x);
214
+ *
215
+ * // read-only const argument:
216
+ * void foo2(const Ref<const VectorXf>& x);
217
+ * \endcode
218
+ *
219
+ * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation
220
+ * issue will be triggered. By default, a Ref<VectorXf> can reference any dense vector expression of float having a
221
+ * contiguous memory layout. Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float
222
+ * whose column's elements are contiguously stored with the possibility to have a constant space in-between each column,
223
+ * i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) can be greater than the number
224
+ * of rows.
225
+ *
226
+ * In the const case, if the input expression does not match the above requirement, then it is evaluated into a
227
+ * temporary before being passed to the function. Here are some examples: \code MatrixXf A; VectorXf a; foo1(a.head());
228
+ * // OK foo1(A.col()); // OK foo1(A.row()); // Compilation error because here innerstride!=1
229
+ * foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object
230
+ * foo2(A.row().transpose()); // The row is copied into a contiguous temporary
231
+ * foo2(2*a); // The expression is evaluated into a temporary
232
+ * foo2(A.col().segment(2,4)); // No temporary
233
+ * \endcode
234
+ *
235
+ * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters.
236
+ * Here is an example accepting an innerstride!=1:
237
+ * \code
238
+ * // in-out argument:
239
+ * void foo3(Ref<VectorXf,0,InnerStride<> > x);
240
+ * foo3(A.row()); // OK
241
+ * \endcode
242
+ * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to
243
+ * exploit vectorization, and will involve more expensive address computations even if the input is contiguously stored
244
+ * in memory. To overcome this issue, one might propose to overload internally calling a template function, e.g.: \code
245
+ * // in the .h:
246
+ * void foo(const Ref<MatrixXf>& A);
247
+ * void foo(const Ref<MatrixXf,0,Stride<> >& A);
248
+ *
249
+ * // in the .cpp:
250
+ * template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
251
+ * ... // crazy code goes here
252
+ * }
253
+ * void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
254
+ * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
255
+ * \endcode
256
+ *
257
+ * See also the following stackoverflow questions for further references:
258
+ * - <a href="http://stackoverflow.com/questions/21132538/correct-usage-of-the-eigenref-class">Correct usage of the
259
+ * Eigen::Ref<> class</a>
260
+ *
261
+ * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
262
+ */
263
+ template <typename PlainObjectType, int Options, typename StrideType>
264
+ class Ref : public RefBase<Ref<PlainObjectType, Options, StrideType> > {
265
+ private:
266
+ typedef internal::traits<Ref> Traits;
267
+ template <typename Derived>
268
+ EIGEN_DEVICE_FUNC inline Ref(
269
+ const PlainObjectBase<Derived>& expr,
270
+ std::enable_if_t<bool(Traits::template match<Derived>::MatchAtCompileTime), Derived>* = 0);
271
+
272
+ public:
273
+ typedef RefBase<Ref> Base;
274
+ EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
275
+
276
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
277
+ template <typename Derived>
278
+ EIGEN_DEVICE_FUNC inline Ref(
279
+ PlainObjectBase<Derived>& expr,
280
+ std::enable_if_t<bool(Traits::template match<Derived>::MatchAtCompileTime), Derived>* = 0) {
281
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
282
+ // Construction must pass since we will not create temporary storage in the non-const case.
283
+ const bool success = Base::construct(expr.derived());
284
+ EIGEN_UNUSED_VARIABLE(success)
285
+ eigen_assert(success);
286
+ }
287
+ template <typename Derived>
288
+ EIGEN_DEVICE_FUNC inline Ref(
289
+ const DenseBase<Derived>& expr,
290
+ std::enable_if_t<bool(Traits::template match<Derived>::MatchAtCompileTime), Derived>* = 0)
291
+ #else
292
+ /** Implicit constructor from any dense expression */
293
+ template <typename Derived>
294
+ inline Ref(DenseBase<Derived>& expr)
295
+ #endif
296
+ {
297
+ EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
298
+ EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
299
+ EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
300
+ // Construction must pass since we will not create temporary storage in the non-const case.
301
+ const bool success = Base::construct(expr.const_cast_derived());
302
+ EIGEN_UNUSED_VARIABLE(success)
303
+ eigen_assert(success);
304
+ }
305
+
306
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
307
+ };
308
+
309
+ // this is the const ref version
310
+ template <typename TPlainObjectType, int Options, typename StrideType>
311
+ class Ref<const TPlainObjectType, Options, StrideType>
312
+ : public RefBase<Ref<const TPlainObjectType, Options, StrideType> > {
313
+ typedef internal::traits<Ref> Traits;
314
+
315
+ static constexpr bool may_map_m_object_successfully =
316
+ (static_cast<int>(StrideType::InnerStrideAtCompileTime) == 0 ||
317
+ static_cast<int>(StrideType::InnerStrideAtCompileTime) == 1 ||
318
+ static_cast<int>(StrideType::InnerStrideAtCompileTime) == Dynamic) &&
319
+ (TPlainObjectType::IsVectorAtCompileTime || static_cast<int>(StrideType::OuterStrideAtCompileTime) == 0 ||
320
+ static_cast<int>(StrideType::OuterStrideAtCompileTime) == Dynamic ||
321
+ static_cast<int>(StrideType::OuterStrideAtCompileTime) ==
322
+ static_cast<int>(TPlainObjectType::InnerSizeAtCompileTime) ||
323
+ static_cast<int>(TPlainObjectType::InnerSizeAtCompileTime) == Dynamic);
324
+
325
+ public:
326
+ typedef RefBase<Ref> Base;
327
+ EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
328
+
329
+ template <typename Derived>
330
+ EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,
331
+ std::enable_if_t<bool(Traits::template match<Derived>::ScalarTypeMatch), Derived>* = 0) {
332
+ // std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << ","
333
+ // << match_helper<Derived>::InnerStrideMatch << "\n"; std::cout << int(StrideType::OuterStrideAtCompileTime)
334
+ // << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; std::cout <<
335
+ // int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
336
+ EIGEN_STATIC_ASSERT(Traits::template match<Derived>::type::value || may_map_m_object_successfully,
337
+ STORAGE_LAYOUT_DOES_NOT_MATCH);
338
+ construct(expr.derived(), typename Traits::template match<Derived>::type());
339
+ }
340
+
341
+ EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) {
342
+ // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
343
+ }
344
+
345
+ EIGEN_DEVICE_FUNC inline Ref(Ref&& other) {
346
+ if (other.data() == other.m_object.data()) {
347
+ m_object = std::move(other.m_object);
348
+ Base::construct(m_object);
349
+ } else
350
+ Base::construct(other);
351
+ }
352
+
353
+ template <typename OtherRef>
354
+ EIGEN_DEVICE_FUNC inline Ref(const RefBase<OtherRef>& other) {
355
+ EIGEN_STATIC_ASSERT(Traits::template match<OtherRef>::type::value || may_map_m_object_successfully,
356
+ STORAGE_LAYOUT_DOES_NOT_MATCH);
357
+ construct(other.derived(), typename Traits::template match<OtherRef>::type());
358
+ }
359
+
360
+ protected:
361
+ template <typename Expression>
362
+ EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::true_type) {
363
+ // Check if we can use the underlying expr's storage directly, otherwise call the copy version.
364
+ if (!Base::construct(expr)) {
365
+ construct(expr, internal::false_type());
366
+ }
367
+ }
368
+
369
+ template <typename Expression>
370
+ EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) {
371
+ internal::call_assignment_no_alias(m_object, expr, internal::assign_op<Scalar, Scalar>());
372
+ const bool success = Base::construct(m_object);
373
+ EIGEN_ONLY_USED_FOR_DEBUG(success)
374
+ eigen_assert(success);
375
+ }
376
+
377
+ protected:
378
+ TPlainObjectType m_object;
379
+ };
380
+
381
+ } // end namespace Eigen
382
+
383
+ #endif // EIGEN_REF_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Replicate.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_REPLICATE_H
11
+ #define EIGEN_REPLICATE_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+ template <typename MatrixType, int RowFactor, int ColFactor>
20
+ struct traits<Replicate<MatrixType, RowFactor, ColFactor> > : traits<MatrixType> {
21
+ typedef typename MatrixType::Scalar Scalar;
22
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
23
+ typedef typename traits<MatrixType>::XprKind XprKind;
24
+ typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
25
+ typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
26
+ enum {
27
+ RowsAtCompileTime = RowFactor == Dynamic || int(MatrixType::RowsAtCompileTime) == Dynamic
28
+ ? Dynamic
29
+ : RowFactor * MatrixType::RowsAtCompileTime,
30
+ ColsAtCompileTime = ColFactor == Dynamic || int(MatrixType::ColsAtCompileTime) == Dynamic
31
+ ? Dynamic
32
+ : ColFactor * MatrixType::ColsAtCompileTime,
33
+ // FIXME we don't propagate the max sizes !!!
34
+ MaxRowsAtCompileTime = RowsAtCompileTime,
35
+ MaxColsAtCompileTime = ColsAtCompileTime,
36
+ IsRowMajor = MaxRowsAtCompileTime == 1 && MaxColsAtCompileTime != 1 ? 1
37
+ : MaxColsAtCompileTime == 1 && MaxRowsAtCompileTime != 1 ? 0
38
+ : (MatrixType::Flags & RowMajorBit) ? 1
39
+ : 0,
40
+
41
+ // FIXME enable DirectAccess with negative strides?
42
+ Flags = IsRowMajor ? RowMajorBit : 0
43
+ };
44
+ };
45
+ } // namespace internal
46
+
47
+ /**
48
+ * \class Replicate
49
+ * \ingroup Core_Module
50
+ *
51
+ * \brief Expression of the multiple replication of a matrix or vector
52
+ *
53
+ * \tparam MatrixType the type of the object we are replicating
54
+ * \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic.
55
+ * \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic.
56
+ *
57
+ * This class represents an expression of the multiple replication of a matrix or vector.
58
+ * It is the return type of DenseBase::replicate() and most of the time
59
+ * this is the only way it is used.
60
+ *
61
+ * \sa DenseBase::replicate()
62
+ */
63
+ template <typename MatrixType, int RowFactor, int ColFactor>
64
+ class Replicate : public internal::dense_xpr_base<Replicate<MatrixType, RowFactor, ColFactor> >::type {
65
+ typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;
66
+ typedef typename internal::traits<Replicate>::MatrixTypeNested_ MatrixTypeNested_;
67
+
68
+ public:
69
+ typedef typename internal::dense_xpr_base<Replicate>::type Base;
70
+ EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
71
+ typedef internal::remove_all_t<MatrixType> NestedExpression;
72
+
73
+ template <typename OriginalMatrixType>
74
+ EIGEN_DEVICE_FUNC inline explicit Replicate(const OriginalMatrixType& matrix)
75
+ : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) {
76
+ EIGEN_STATIC_ASSERT((internal::is_same<std::remove_const_t<MatrixType>, OriginalMatrixType>::value),
77
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
78
+ eigen_assert(RowFactor != Dynamic && ColFactor != Dynamic);
79
+ }
80
+
81
+ template <typename OriginalMatrixType>
82
+ EIGEN_DEVICE_FUNC inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
83
+ : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) {
84
+ EIGEN_STATIC_ASSERT((internal::is_same<std::remove_const_t<MatrixType>, OriginalMatrixType>::value),
85
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
86
+ }
87
+
88
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
89
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
90
+
91
+ EIGEN_DEVICE_FUNC const MatrixTypeNested_& nestedExpression() const { return m_matrix; }
92
+
93
+ protected:
94
+ MatrixTypeNested m_matrix;
95
+ const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
96
+ const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
97
+ };
98
+
99
+ /**
100
+ * \return an expression of the replication of \c *this
101
+ *
102
+ * Example: \include MatrixBase_replicate.cpp
103
+ * Output: \verbinclude MatrixBase_replicate.out
104
+ *
105
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
106
+ */
107
+ template <typename Derived>
108
+ template <int RowFactor, int ColFactor>
109
+ EIGEN_DEVICE_FUNC const Replicate<Derived, RowFactor, ColFactor> DenseBase<Derived>::replicate() const {
110
+ return Replicate<Derived, RowFactor, ColFactor>(derived());
111
+ }
112
+
113
+ /**
114
+ * \return an expression of the replication of each column (or row) of \c *this
115
+ *
116
+ * Example: \include DirectionWise_replicate_int.cpp
117
+ * Output: \verbinclude DirectionWise_replicate_int.out
118
+ *
119
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
120
+ */
121
+ template <typename ExpressionType, int Direction>
122
+ EIGEN_DEVICE_FUNC const typename VectorwiseOp<ExpressionType, Direction>::ReplicateReturnType
123
+ VectorwiseOp<ExpressionType, Direction>::replicate(Index factor) const {
124
+ return typename VectorwiseOp<ExpressionType, Direction>::ReplicateReturnType(
125
+ _expression(), Direction == Vertical ? factor : 1, Direction == Horizontal ? factor : 1);
126
+ }
127
+
128
+ } // end namespace Eigen
129
+
130
+ #endif // EIGEN_REPLICATE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Reshaped.h ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2014 yoco <peter.xiau@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_RESHAPED_H
12
+ #define EIGEN_RESHAPED_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \class Reshaped
20
+ * \ingroup Core_Module
21
+ *
22
+ * \brief Expression of a fixed-size or dynamic-size reshape
23
+ *
24
+ * \tparam XprType the type of the expression in which we are taking a reshape
25
+ * \tparam Rows the number of rows of the reshape we are taking at compile time (optional)
26
+ * \tparam Cols the number of columns of the reshape we are taking at compile time (optional)
27
+ * \tparam Order can be ColMajor or RowMajor, default is ColMajor.
28
+ *
29
+ * This class represents an expression of either a fixed-size or dynamic-size reshape.
30
+ * It is the return type of DenseBase::reshaped(NRowsType,NColsType) and
31
+ * most of the time this is the only way it is used.
32
+ *
33
+ * If you want to directly manipulate reshaped expressions,
34
+ * for instance if you want to write a function returning such an expression,
35
+ * it is advised to use the \em auto keyword for such use cases.
36
+ *
37
+ * Here is an example illustrating the dynamic case:
38
+ * \include class_Reshaped.cpp
39
+ * Output: \verbinclude class_Reshaped.out
40
+ *
41
+ * Here is an example illustrating the fixed-size case:
42
+ * \include class_FixedReshaped.cpp
43
+ * Output: \verbinclude class_FixedReshaped.out
44
+ *
45
+ * \sa DenseBase::reshaped(NRowsType,NColsType)
46
+ */
47
+
48
+ namespace internal {
49
+
50
+ template <typename XprType, int Rows, int Cols, int Order>
51
+ struct traits<Reshaped<XprType, Rows, Cols, Order> > : traits<XprType> {
52
+ typedef typename traits<XprType>::Scalar Scalar;
53
+ typedef typename traits<XprType>::StorageKind StorageKind;
54
+ typedef typename traits<XprType>::XprKind XprKind;
55
+ enum {
56
+ MatrixRows = traits<XprType>::RowsAtCompileTime,
57
+ MatrixCols = traits<XprType>::ColsAtCompileTime,
58
+ RowsAtCompileTime = Rows,
59
+ ColsAtCompileTime = Cols,
60
+ MaxRowsAtCompileTime = Rows,
61
+ MaxColsAtCompileTime = Cols,
62
+ XpxStorageOrder = ((int(traits<XprType>::Flags) & RowMajorBit) == RowMajorBit) ? RowMajor : ColMajor,
63
+ ReshapedStorageOrder = (RowsAtCompileTime == 1 && ColsAtCompileTime != 1) ? RowMajor
64
+ : (ColsAtCompileTime == 1 && RowsAtCompileTime != 1) ? ColMajor
65
+ : XpxStorageOrder,
66
+ HasSameStorageOrderAsXprType = (ReshapedStorageOrder == XpxStorageOrder),
67
+ InnerSize = (ReshapedStorageOrder == int(RowMajor)) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
68
+ InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time<XprType>::ret) : Dynamic,
69
+ OuterStrideAtCompileTime = Dynamic,
70
+
71
+ HasDirectAccess = internal::has_direct_access<XprType>::ret && (Order == int(XpxStorageOrder)) &&
72
+ ((evaluator<XprType>::Flags & LinearAccessBit) == LinearAccessBit),
73
+
74
+ MaskPacketAccessBit =
75
+ (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0) && (InnerStrideAtCompileTime == 1)
76
+ ? PacketAccessBit
77
+ : 0,
78
+ // MaskAlignedBit = ((OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16)
79
+ // == 0)) ? AlignedBit : 0,
80
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
81
+ FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
82
+ FlagsRowMajorBit = (ReshapedStorageOrder == int(RowMajor)) ? RowMajorBit : 0,
83
+ FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0,
84
+ Flags0 = traits<XprType>::Flags & ((HereditaryBits & ~RowMajorBit) | MaskPacketAccessBit),
85
+
86
+ Flags = (Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit | FlagsDirectAccessBit)
87
+ };
88
+ };
89
+
90
+ template <typename XprType, int Rows, int Cols, int Order, bool HasDirectAccess>
91
+ class ReshapedImpl_dense;
92
+
93
+ } // end namespace internal
94
+
95
+ template <typename XprType, int Rows, int Cols, int Order, typename StorageKind>
96
+ class ReshapedImpl;
97
+
98
+ template <typename XprType, int Rows, int Cols, int Order>
99
+ class Reshaped : public ReshapedImpl<XprType, Rows, Cols, Order, typename internal::traits<XprType>::StorageKind> {
100
+ typedef ReshapedImpl<XprType, Rows, Cols, Order, typename internal::traits<XprType>::StorageKind> Impl;
101
+
102
+ public:
103
+ // typedef typename Impl::Base Base;
104
+ typedef Impl Base;
105
+ EIGEN_GENERIC_PUBLIC_INTERFACE(Reshaped)
106
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reshaped)
107
+
108
+ /** Fixed-size constructor
109
+ */
110
+ EIGEN_DEVICE_FUNC inline Reshaped(XprType& xpr) : Impl(xpr) {
111
+ EIGEN_STATIC_ASSERT(RowsAtCompileTime != Dynamic && ColsAtCompileTime != Dynamic,
112
+ THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
113
+ eigen_assert(Rows * Cols == xpr.rows() * xpr.cols());
114
+ }
115
+
116
+ /** Dynamic-size constructor
117
+ */
118
+ EIGEN_DEVICE_FUNC inline Reshaped(XprType& xpr, Index reshapeRows, Index reshapeCols)
119
+ : Impl(xpr, reshapeRows, reshapeCols) {
120
+ eigen_assert((RowsAtCompileTime == Dynamic || RowsAtCompileTime == reshapeRows) &&
121
+ (ColsAtCompileTime == Dynamic || ColsAtCompileTime == reshapeCols));
122
+ eigen_assert(reshapeRows * reshapeCols == xpr.rows() * xpr.cols());
123
+ }
124
+ };
125
+
126
+ // The generic default implementation for dense reshape simply forward to the internal::ReshapedImpl_dense
127
+ // that must be specialized for direct and non-direct access...
128
+ template <typename XprType, int Rows, int Cols, int Order>
129
+ class ReshapedImpl<XprType, Rows, Cols, Order, Dense>
130
+ : public internal::ReshapedImpl_dense<XprType, Rows, Cols, Order,
131
+ internal::traits<Reshaped<XprType, Rows, Cols, Order> >::HasDirectAccess> {
132
+ typedef internal::ReshapedImpl_dense<XprType, Rows, Cols, Order,
133
+ internal::traits<Reshaped<XprType, Rows, Cols, Order> >::HasDirectAccess>
134
+ Impl;
135
+
136
+ public:
137
+ typedef Impl Base;
138
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl)
139
+ EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr) : Impl(xpr) {}
140
+ EIGEN_DEVICE_FUNC inline ReshapedImpl(XprType& xpr, Index reshapeRows, Index reshapeCols)
141
+ : Impl(xpr, reshapeRows, reshapeCols) {}
142
+ };
143
+
144
+ namespace internal {
145
+
146
+ /** \internal Internal implementation of dense Reshaped in the general case. */
147
+ template <typename XprType, int Rows, int Cols, int Order>
148
+ class ReshapedImpl_dense<XprType, Rows, Cols, Order, false>
149
+ : public internal::dense_xpr_base<Reshaped<XprType, Rows, Cols, Order> >::type {
150
+ typedef Reshaped<XprType, Rows, Cols, Order> ReshapedType;
151
+
152
+ public:
153
+ typedef typename internal::dense_xpr_base<ReshapedType>::type Base;
154
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType)
155
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense)
156
+
157
+ typedef typename internal::ref_selector<XprType>::non_const_type MatrixTypeNested;
158
+ typedef internal::remove_all_t<XprType> NestedExpression;
159
+
160
+ class InnerIterator;
161
+
162
+ /** Fixed-size constructor
163
+ */
164
+ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr) : m_xpr(xpr), m_rows(Rows), m_cols(Cols) {}
165
+
166
+ /** Dynamic-size constructor
167
+ */
168
+ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols)
169
+ : m_xpr(xpr), m_rows(nRows), m_cols(nCols) {}
170
+
171
+ EIGEN_DEVICE_FUNC Index rows() const { return m_rows; }
172
+ EIGEN_DEVICE_FUNC Index cols() const { return m_cols; }
173
+
174
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
175
+ /** \sa MapBase::data() */
176
+ EIGEN_DEVICE_FUNC inline const Scalar* data() const;
177
+ EIGEN_DEVICE_FUNC inline Index innerStride() const;
178
+ EIGEN_DEVICE_FUNC inline Index outerStride() const;
179
+ #endif
180
+
181
+ /** \returns the nested expression */
182
+ EIGEN_DEVICE_FUNC const internal::remove_all_t<XprType>& nestedExpression() const { return m_xpr; }
183
+
184
+ /** \returns the nested expression */
185
+ EIGEN_DEVICE_FUNC std::remove_reference_t<XprType>& nestedExpression() { return m_xpr; }
186
+
187
+ protected:
188
+ MatrixTypeNested m_xpr;
189
+ const internal::variable_if_dynamic<Index, Rows> m_rows;
190
+ const internal::variable_if_dynamic<Index, Cols> m_cols;
191
+ };
192
+
193
+ /** \internal Internal implementation of dense Reshaped in the direct access case. */
194
+ template <typename XprType, int Rows, int Cols, int Order>
195
+ class ReshapedImpl_dense<XprType, Rows, Cols, Order, true> : public MapBase<Reshaped<XprType, Rows, Cols, Order> > {
196
+ typedef Reshaped<XprType, Rows, Cols, Order> ReshapedType;
197
+ typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;
198
+
199
+ public:
200
+ typedef MapBase<ReshapedType> Base;
201
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReshapedType)
202
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ReshapedImpl_dense)
203
+
204
+ /** Fixed-size constructor
205
+ */
206
+ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr) : Base(xpr.data()), m_xpr(xpr) {}
207
+
208
+ /** Dynamic-size constructor
209
+ */
210
+ EIGEN_DEVICE_FUNC inline ReshapedImpl_dense(XprType& xpr, Index nRows, Index nCols)
211
+ : Base(xpr.data(), nRows, nCols), m_xpr(xpr) {}
212
+
213
+ EIGEN_DEVICE_FUNC const internal::remove_all_t<XprTypeNested>& nestedExpression() const { return m_xpr; }
214
+
215
+ EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; }
216
+
217
+ /** \sa MapBase::innerStride() */
218
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const { return m_xpr.innerStride(); }
219
+
220
+ /** \sa MapBase::outerStride() */
221
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const {
222
+ return (((Flags & RowMajorBit) == RowMajorBit) ? this->cols() : this->rows()) * m_xpr.innerStride();
223
+ }
224
+
225
+ protected:
226
+ XprTypeNested m_xpr;
227
+ };
228
+
229
+ // Evaluators
230
+ template <typename ArgType, int Rows, int Cols, int Order, bool HasDirectAccess>
231
+ struct reshaped_evaluator;
232
+
233
+ template <typename ArgType, int Rows, int Cols, int Order>
234
+ struct evaluator<Reshaped<ArgType, Rows, Cols, Order> >
235
+ : reshaped_evaluator<ArgType, Rows, Cols, Order, traits<Reshaped<ArgType, Rows, Cols, Order> >::HasDirectAccess> {
236
+ typedef Reshaped<ArgType, Rows, Cols, Order> XprType;
237
+ typedef typename XprType::Scalar Scalar;
238
+ // TODO: should check for smaller packet types
239
+ typedef typename packet_traits<Scalar>::type PacketScalar;
240
+
241
+ enum {
242
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
243
+ HasDirectAccess = traits<XprType>::HasDirectAccess,
244
+
245
+ // RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
246
+ // ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
247
+ // MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
248
+ // MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
249
+ //
250
+ // InnerStrideAtCompileTime = traits<XprType>::HasSameStorageOrderAsXprType
251
+ // ? int(inner_stride_at_compile_time<ArgType>::ret)
252
+ // : Dynamic,
253
+ // OuterStrideAtCompileTime = Dynamic,
254
+
255
+ FlagsLinearAccessBit =
256
+ (traits<XprType>::RowsAtCompileTime == 1 || traits<XprType>::ColsAtCompileTime == 1 || HasDirectAccess)
257
+ ? LinearAccessBit
258
+ : 0,
259
+ FlagsRowMajorBit = (traits<XprType>::ReshapedStorageOrder == int(RowMajor)) ? RowMajorBit : 0,
260
+ FlagsDirectAccessBit = HasDirectAccess ? DirectAccessBit : 0,
261
+ Flags0 = evaluator<ArgType>::Flags & (HereditaryBits & ~RowMajorBit),
262
+ Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit | FlagsDirectAccessBit,
263
+
264
+ PacketAlignment = unpacket_traits<PacketScalar>::alignment,
265
+ Alignment = evaluator<ArgType>::Alignment
266
+ };
267
+ typedef reshaped_evaluator<ArgType, Rows, Cols, Order, HasDirectAccess> reshaped_evaluator_type;
268
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : reshaped_evaluator_type(xpr) {
269
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
270
+ }
271
+ };
272
+
273
+ template <typename ArgType, int Rows, int Cols, int Order>
274
+ struct reshaped_evaluator<ArgType, Rows, Cols, Order, /* HasDirectAccess */ false>
275
+ : evaluator_base<Reshaped<ArgType, Rows, Cols, Order> > {
276
+ typedef Reshaped<ArgType, Rows, Cols, Order> XprType;
277
+
278
+ enum {
279
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost /* TODO + cost of index computations */,
280
+
281
+ Flags = (evaluator<ArgType>::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)),
282
+
283
+ Alignment = 0
284
+ };
285
+
286
+ EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr) {
287
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
288
+ }
289
+
290
+ typedef typename XprType::Scalar Scalar;
291
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
292
+
293
+ typedef std::pair<Index, Index> RowCol;
294
+
295
+ EIGEN_DEVICE_FUNC inline RowCol index_remap(Index rowId, Index colId) const {
296
+ if (Order == ColMajor) {
297
+ const Index nth_elem_idx = colId * m_xpr.rows() + rowId;
298
+ return RowCol(nth_elem_idx % m_xpr.nestedExpression().rows(), nth_elem_idx / m_xpr.nestedExpression().rows());
299
+ } else {
300
+ const Index nth_elem_idx = colId + rowId * m_xpr.cols();
301
+ return RowCol(nth_elem_idx / m_xpr.nestedExpression().cols(), nth_elem_idx % m_xpr.nestedExpression().cols());
302
+ }
303
+ }
304
+
305
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) {
306
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
307
+ const RowCol row_col = index_remap(rowId, colId);
308
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
309
+ }
310
+
311
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const {
312
+ const RowCol row_col = index_remap(rowId, colId);
313
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
314
+ }
315
+
316
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const {
317
+ const RowCol row_col = index_remap(rowId, colId);
318
+ return m_argImpl.coeff(row_col.first, row_col.second);
319
+ }
320
+
321
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) {
322
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
323
+ const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0);
324
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
325
+ }
326
+
327
+ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const {
328
+ const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0);
329
+ return m_argImpl.coeffRef(row_col.first, row_col.second);
330
+ }
331
+
332
+ EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const {
333
+ const RowCol row_col = index_remap(Rows == 1 ? 0 : index, Rows == 1 ? index : 0);
334
+ return m_argImpl.coeff(row_col.first, row_col.second);
335
+ }
336
+ #if 0
337
+ EIGEN_DEVICE_FUNC
338
+ template<int LoadMode>
339
+ inline PacketScalar packet(Index rowId, Index colId) const
340
+ {
341
+ const RowCol row_col = index_remap(rowId, colId);
342
+ return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second);
343
+
344
+ }
345
+
346
+ template<int LoadMode>
347
+ EIGEN_DEVICE_FUNC
348
+ inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
349
+ {
350
+ const RowCol row_col = index_remap(rowId, colId);
351
+ m_argImpl.const_cast_derived().template writePacket<Unaligned>
352
+ (row_col.first, row_col.second, val);
353
+ }
354
+
355
+ template<int LoadMode>
356
+ EIGEN_DEVICE_FUNC
357
+ inline PacketScalar packet(Index index) const
358
+ {
359
+ const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index,
360
+ RowsAtCompileTime == 1 ? index : 0);
361
+ return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second);
362
+ }
363
+
364
+ template<int LoadMode>
365
+ EIGEN_DEVICE_FUNC
366
+ inline void writePacket(Index index, const PacketScalar& val)
367
+ {
368
+ const RowCol row_col = index_remap(RowsAtCompileTime == 1 ? 0 : index,
369
+ RowsAtCompileTime == 1 ? index : 0);
370
+ return m_argImpl.template packet<Unaligned>(row_col.first, row_col.second, val);
371
+ }
372
+ #endif
373
+ protected:
374
+ evaluator<ArgType> m_argImpl;
375
+ const XprType& m_xpr;
376
+ };
377
+
378
+ template <typename ArgType, int Rows, int Cols, int Order>
379
+ struct reshaped_evaluator<ArgType, Rows, Cols, Order, /* HasDirectAccess */ true>
380
+ : mapbase_evaluator<Reshaped<ArgType, Rows, Cols, Order>,
381
+ typename Reshaped<ArgType, Rows, Cols, Order>::PlainObject> {
382
+ typedef Reshaped<ArgType, Rows, Cols, Order> XprType;
383
+ typedef typename XprType::Scalar Scalar;
384
+
385
+ EIGEN_DEVICE_FUNC explicit reshaped_evaluator(const XprType& xpr)
386
+ : mapbase_evaluator<XprType, typename XprType::PlainObject>(xpr) {
387
+ // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta
388
+ // lifetime
389
+ eigen_assert(((std::uintptr_t(xpr.data()) % plain_enum_max(1, evaluator<XprType>::Alignment)) == 0) &&
390
+ "data is not aligned");
391
+ }
392
+ };
393
+
394
+ } // end namespace internal
395
+
396
+ } // end namespace Eigen
397
+
398
+ #endif // EIGEN_RESHAPED_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/ReturnByValue.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_RETURNBYVALUE_H
12
+ #define EIGEN_RETURNBYVALUE_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+
21
+ template <typename Derived>
22
+ struct traits<ReturnByValue<Derived> > : public traits<typename traits<Derived>::ReturnType> {
23
+ enum {
24
+ // We're disabling the DirectAccess because e.g. the constructor of
25
+ // the Block-with-DirectAccess expression requires to have a coeffRef method.
26
+ // Also, we don't want to have to implement the stride stuff.
27
+ Flags = (traits<typename traits<Derived>::ReturnType>::Flags | EvalBeforeNestingBit) & ~DirectAccessBit
28
+ };
29
+ };
30
+
31
+ /* The ReturnByValue object doesn't even have a coeff() method.
32
+ * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix.
33
+ * So internal::nested always gives the plain return matrix type.
34
+ *
35
+ * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ??
36
+ * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators
37
+ */
38
+ template <typename Derived, int n, typename PlainObject>
39
+ struct nested_eval<ReturnByValue<Derived>, n, PlainObject> {
40
+ typedef typename traits<Derived>::ReturnType type;
41
+ };
42
+
43
+ } // end namespace internal
44
+
45
+ /** \class ReturnByValue
46
+ * \ingroup Core_Module
47
+ *
48
+ */
49
+ template <typename Derived>
50
+ class ReturnByValue : public internal::dense_xpr_base<ReturnByValue<Derived> >::type, internal::no_assignment_operator {
51
+ public:
52
+ typedef typename internal::traits<Derived>::ReturnType ReturnType;
53
+
54
+ typedef typename internal::dense_xpr_base<ReturnByValue>::type Base;
55
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
56
+
57
+ template <typename Dest>
58
+ EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const {
59
+ static_cast<const Derived*>(this)->evalTo(dst);
60
+ }
61
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT {
62
+ return static_cast<const Derived*>(this)->rows();
63
+ }
64
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT {
65
+ return static_cast<const Derived*>(this)->cols();
66
+ }
67
+
68
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
69
+ #define Unusable \
70
+ YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
71
+ class Unusable {
72
+ Unusable(const Unusable&) {}
73
+ Unusable& operator=(const Unusable&) { return *this; }
74
+ };
75
+ const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); }
76
+ const Unusable& coeff(Index, Index) const { return *reinterpret_cast<const Unusable*>(this); }
77
+ Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
78
+ Unusable& coeffRef(Index, Index) { return *reinterpret_cast<Unusable*>(this); }
79
+ #undef Unusable
80
+ #endif
81
+ };
82
+
83
+ template <typename Derived>
84
+ template <typename OtherDerived>
85
+ EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other) {
86
+ other.evalTo(derived());
87
+ return derived();
88
+ }
89
+
90
+ namespace internal {
91
+
92
+ // Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that
93
+ // when a ReturnByValue expression is assigned, the evaluator is not constructed.
94
+ // TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world
95
+
96
+ template <typename Derived>
97
+ struct evaluator<ReturnByValue<Derived> > : public evaluator<typename internal::traits<Derived>::ReturnType> {
98
+ typedef ReturnByValue<Derived> XprType;
99
+ typedef typename internal::traits<Derived>::ReturnType PlainObject;
100
+ typedef evaluator<PlainObject> Base;
101
+
102
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.rows(), xpr.cols()) {
103
+ internal::construct_at<Base>(this, m_result);
104
+ xpr.evalTo(m_result);
105
+ }
106
+
107
+ protected:
108
+ PlainObject m_result;
109
+ };
110
+
111
+ } // end namespace internal
112
+
113
+ } // end namespace Eigen
114
+
115
+ #endif // EIGEN_RETURNBYVALUE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Reverse.h ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>
6
+ // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ #ifndef EIGEN_REVERSE_H
13
+ #define EIGEN_REVERSE_H
14
+
15
+ // IWYU pragma: private
16
+ #include "./InternalHeaderCheck.h"
17
+
18
+ namespace Eigen {
19
+
20
+ namespace internal {
21
+
22
+ template <typename MatrixType, int Direction>
23
+ struct traits<Reverse<MatrixType, Direction> > : traits<MatrixType> {
24
+ typedef typename MatrixType::Scalar Scalar;
25
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
26
+ typedef typename traits<MatrixType>::XprKind XprKind;
27
+ typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
28
+ typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
29
+ enum {
30
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
31
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
32
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
33
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
34
+ Flags = MatrixTypeNested_::Flags & (RowMajorBit | LvalueBit)
35
+ };
36
+ };
37
+
38
+ template <typename PacketType, bool ReversePacket>
39
+ struct reverse_packet_cond {
40
+ static inline PacketType run(const PacketType& x) { return preverse(x); }
41
+ };
42
+
43
+ template <typename PacketType>
44
+ struct reverse_packet_cond<PacketType, false> {
45
+ static inline PacketType run(const PacketType& x) { return x; }
46
+ };
47
+
48
+ } // end namespace internal
49
+
50
+ /** \class Reverse
51
+ * \ingroup Core_Module
52
+ *
53
+ * \brief Expression of the reverse of a vector or matrix
54
+ *
55
+ * \tparam MatrixType the type of the object of which we are taking the reverse
56
+ * \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections
57
+ *
58
+ * This class represents an expression of the reverse of a vector.
59
+ * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
60
+ * and most of the time this is the only way it is used.
61
+ *
62
+ * \sa MatrixBase::reverse(), VectorwiseOp::reverse()
63
+ */
64
+ template <typename MatrixType, int Direction>
65
+ class Reverse : public internal::dense_xpr_base<Reverse<MatrixType, Direction> >::type {
66
+ public:
67
+ typedef typename internal::dense_xpr_base<Reverse>::type Base;
68
+ EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
69
+ typedef internal::remove_all_t<MatrixType> NestedExpression;
70
+ using Base::IsRowMajor;
71
+
72
+ protected:
73
+ enum {
74
+ PacketSize = internal::packet_traits<Scalar>::size,
75
+ IsColMajor = !IsRowMajor,
76
+ ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
77
+ ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
78
+ OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
79
+ OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1,
80
+ ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) ||
81
+ ((Direction == Horizontal) && IsRowMajor)
82
+ };
83
+ typedef internal::reverse_packet_cond<PacketScalar, ReversePacket> reverse_packet;
84
+
85
+ public:
86
+ EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) {}
87
+
88
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
89
+
90
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
91
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
92
+
93
+ EIGEN_DEVICE_FUNC inline Index innerStride() const { return -m_matrix.innerStride(); }
94
+
95
+ EIGEN_DEVICE_FUNC const internal::remove_all_t<typename MatrixType::Nested>& nestedExpression() const {
96
+ return m_matrix;
97
+ }
98
+
99
+ protected:
100
+ typename MatrixType::Nested m_matrix;
101
+ };
102
+
103
+ /** \returns an expression of the reverse of *this.
104
+ *
105
+ * Example: \include MatrixBase_reverse.cpp
106
+ * Output: \verbinclude MatrixBase_reverse.out
107
+ *
108
+ */
109
+ template <typename Derived>
110
+ EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ReverseReturnType DenseBase<Derived>::reverse() {
111
+ return ReverseReturnType(derived());
112
+ }
113
+
114
+ // reverse const overload moved DenseBase.h due to a CUDA compiler bug
115
+
116
+ /** This is the "in place" version of reverse: it reverses \c *this.
117
+ *
118
+ * In most cases it is probably better to simply use the reversed expression
119
+ * of a matrix. However, when reversing the matrix data itself is really needed,
120
+ * then this "in-place" version is probably the right choice because it provides
121
+ * the following additional benefits:
122
+ * - less error prone: doing the same operation with .reverse() requires special care:
123
+ * \code m = m.reverse().eval(); \endcode
124
+ * - this API enables reverse operations without the need for a temporary
125
+ * - it allows future optimizations (cache friendliness, etc.)
126
+ *
127
+ * \sa VectorwiseOp::reverseInPlace(), reverse() */
128
+ template <typename Derived>
129
+ EIGEN_DEVICE_FUNC inline void DenseBase<Derived>::reverseInPlace() {
130
+ if (cols() > rows()) {
131
+ Index half = cols() / 2;
132
+ leftCols(half).swap(rightCols(half).reverse());
133
+ if ((cols() % 2) == 1) {
134
+ Index half2 = rows() / 2;
135
+ col(half).head(half2).swap(col(half).tail(half2).reverse());
136
+ }
137
+ } else {
138
+ Index half = rows() / 2;
139
+ topRows(half).swap(bottomRows(half).reverse());
140
+ if ((rows() % 2) == 1) {
141
+ Index half2 = cols() / 2;
142
+ row(half).head(half2).swap(row(half).tail(half2).reverse());
143
+ }
144
+ }
145
+ }
146
+
147
+ namespace internal {
148
+
149
+ template <int Direction>
150
+ struct vectorwise_reverse_inplace_impl;
151
+
152
+ template <>
153
+ struct vectorwise_reverse_inplace_impl<Vertical> {
154
+ template <typename ExpressionType>
155
+ static void run(ExpressionType& xpr) {
156
+ constexpr Index HalfAtCompileTime =
157
+ ExpressionType::RowsAtCompileTime == Dynamic ? Dynamic : ExpressionType::RowsAtCompileTime / 2;
158
+ Index half = xpr.rows() / 2;
159
+ xpr.template topRows<HalfAtCompileTime>(half).swap(
160
+ xpr.template bottomRows<HalfAtCompileTime>(half).colwise().reverse());
161
+ }
162
+ };
163
+
164
+ template <>
165
+ struct vectorwise_reverse_inplace_impl<Horizontal> {
166
+ template <typename ExpressionType>
167
+ static void run(ExpressionType& xpr) {
168
+ constexpr Index HalfAtCompileTime =
169
+ ExpressionType::ColsAtCompileTime == Dynamic ? Dynamic : ExpressionType::ColsAtCompileTime / 2;
170
+ Index half = xpr.cols() / 2;
171
+ xpr.template leftCols<HalfAtCompileTime>(half).swap(
172
+ xpr.template rightCols<HalfAtCompileTime>(half).rowwise().reverse());
173
+ }
174
+ };
175
+
176
+ } // end namespace internal
177
+
178
+ /** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this.
179
+ *
180
+ * In most cases it is probably better to simply use the reversed expression
181
+ * of a matrix. However, when reversing the matrix data itself is really needed,
182
+ * then this "in-place" version is probably the right choice because it provides
183
+ * the following additional benefits:
184
+ * - less error prone: doing the same operation with .reverse() requires special care:
185
+ * \code m = m.reverse().eval(); \endcode
186
+ * - this API enables reverse operations without the need for a temporary
187
+ *
188
+ * \sa DenseBase::reverseInPlace(), reverse() */
189
+ template <typename ExpressionType, int Direction>
190
+ EIGEN_DEVICE_FUNC void VectorwiseOp<ExpressionType, Direction>::reverseInPlace() {
191
+ internal::vectorwise_reverse_inplace_impl<Direction>::run(m_matrix);
192
+ }
193
+
194
+ } // end namespace Eigen
195
+
196
+ #endif // EIGEN_REVERSE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Select.h ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SELECT_H
11
+ #define EIGEN_SELECT_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ /** \class Select
19
+ * \ingroup Core_Module
20
+ *
21
+ * \brief Expression of a coefficient wise version of the C++ ternary operator ?:
22
+ *
23
+ * \tparam ConditionMatrixType the type of the \em condition expression which must be a boolean matrix
24
+ * \tparam ThenMatrixType the type of the \em then expression
25
+ * \tparam ElseMatrixType the type of the \em else expression
26
+ *
27
+ * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
28
+ * It is the return type of DenseBase::select() and most of the time this is the only way it is used.
29
+ *
30
+ * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
31
+ */
32
+
33
+ namespace internal {
34
+ template <typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
35
+ struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > : traits<ThenMatrixType> {
36
+ typedef typename traits<ThenMatrixType>::Scalar Scalar;
37
+ typedef Dense StorageKind;
38
+ typedef typename traits<ThenMatrixType>::XprKind XprKind;
39
+ typedef typename ConditionMatrixType::Nested ConditionMatrixNested;
40
+ typedef typename ThenMatrixType::Nested ThenMatrixNested;
41
+ typedef typename ElseMatrixType::Nested ElseMatrixNested;
42
+ enum {
43
+ RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime,
44
+ ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,
45
+ MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
46
+ MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
47
+ Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit
48
+ };
49
+ };
50
+ } // namespace internal
51
+
52
+ template <typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
53
+ class Select : public internal::dense_xpr_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type,
54
+ internal::no_assignment_operator {
55
+ public:
56
+ typedef typename internal::dense_xpr_base<Select>::type Base;
57
+ EIGEN_DENSE_PUBLIC_INTERFACE(Select)
58
+
59
+ inline EIGEN_DEVICE_FUNC Select(const ConditionMatrixType& a_conditionMatrix, const ThenMatrixType& a_thenMatrix,
60
+ const ElseMatrixType& a_elseMatrix)
61
+ : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix) {
62
+ eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
63
+ eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
64
+ }
65
+
66
+ inline EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_condition.rows(); }
67
+ inline EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_condition.cols(); }
68
+
69
+ inline EIGEN_DEVICE_FUNC const Scalar coeff(Index i, Index j) const {
70
+ if (m_condition.coeff(i, j))
71
+ return m_then.coeff(i, j);
72
+ else
73
+ return m_else.coeff(i, j);
74
+ }
75
+
76
+ inline EIGEN_DEVICE_FUNC const Scalar coeff(Index i) const {
77
+ if (m_condition.coeff(i))
78
+ return m_then.coeff(i);
79
+ else
80
+ return m_else.coeff(i);
81
+ }
82
+
83
+ inline EIGEN_DEVICE_FUNC const ConditionMatrixType& conditionMatrix() const { return m_condition; }
84
+
85
+ inline EIGEN_DEVICE_FUNC const ThenMatrixType& thenMatrix() const { return m_then; }
86
+
87
+ inline EIGEN_DEVICE_FUNC const ElseMatrixType& elseMatrix() const { return m_else; }
88
+
89
+ protected:
90
+ typename ConditionMatrixType::Nested m_condition;
91
+ typename ThenMatrixType::Nested m_then;
92
+ typename ElseMatrixType::Nested m_else;
93
+ };
94
+
95
+ /** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
96
+ * if \c *this(i,j) != Scalar(0), and \a elseMatrix(i,j) otherwise.
97
+ *
98
+ * Example: \include MatrixBase_select.cpp
99
+ * Output: \verbinclude MatrixBase_select.out
100
+ *
101
+ * \sa DenseBase::bitwiseSelect(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&)
102
+ */
103
+ template <typename Derived>
104
+ template <typename ThenDerived, typename ElseDerived>
105
+ inline EIGEN_DEVICE_FUNC CwiseTernaryOp<
106
+ internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar, typename DenseBase<ElseDerived>::Scalar,
107
+ typename DenseBase<Derived>::Scalar>,
108
+ ThenDerived, ElseDerived, Derived>
109
+ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix, const DenseBase<ElseDerived>& elseMatrix) const {
110
+ using Op = internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar,
111
+ typename DenseBase<ElseDerived>::Scalar, Scalar>;
112
+ return CwiseTernaryOp<Op, ThenDerived, ElseDerived, Derived>(thenMatrix.derived(), elseMatrix.derived(), derived(),
113
+ Op());
114
+ }
115
+ /** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
116
+ * the \em else expression being a scalar value.
117
+ *
118
+ * \sa DenseBase::booleanSelect(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
119
+ */
120
+ template <typename Derived>
121
+ template <typename ThenDerived>
122
+ inline EIGEN_DEVICE_FUNC CwiseTernaryOp<
123
+ internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar, typename DenseBase<ThenDerived>::Scalar,
124
+ typename DenseBase<Derived>::Scalar>,
125
+ ThenDerived, typename DenseBase<ThenDerived>::ConstantReturnType, Derived>
126
+ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
127
+ const typename DenseBase<ThenDerived>::Scalar& elseScalar) const {
128
+ using ElseConstantType = typename DenseBase<ThenDerived>::ConstantReturnType;
129
+ using Op = internal::scalar_boolean_select_op<typename DenseBase<ThenDerived>::Scalar,
130
+ typename DenseBase<ThenDerived>::Scalar, Scalar>;
131
+ return CwiseTernaryOp<Op, ThenDerived, ElseConstantType, Derived>(
132
+ thenMatrix.derived(), ElseConstantType(rows(), cols(), elseScalar), derived(), Op());
133
+ }
134
+ /** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
135
+ * the \em then expression being a scalar value.
136
+ *
137
+ * \sa DenseBase::booleanSelect(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
138
+ */
139
+ template <typename Derived>
140
+ template <typename ElseDerived>
141
+ inline EIGEN_DEVICE_FUNC CwiseTernaryOp<
142
+ internal::scalar_boolean_select_op<typename DenseBase<ElseDerived>::Scalar, typename DenseBase<ElseDerived>::Scalar,
143
+ typename DenseBase<Derived>::Scalar>,
144
+ typename DenseBase<ElseDerived>::ConstantReturnType, ElseDerived, Derived>
145
+ DenseBase<Derived>::select(const typename DenseBase<ElseDerived>::Scalar& thenScalar,
146
+ const DenseBase<ElseDerived>& elseMatrix) const {
147
+ using ThenConstantType = typename DenseBase<ElseDerived>::ConstantReturnType;
148
+ using Op = internal::scalar_boolean_select_op<typename DenseBase<ElseDerived>::Scalar,
149
+ typename DenseBase<ElseDerived>::Scalar, Scalar>;
150
+ return CwiseTernaryOp<Op, ThenConstantType, ElseDerived, Derived>(ThenConstantType(rows(), cols(), thenScalar),
151
+ elseMatrix.derived(), derived(), Op());
152
+ }
153
+
154
+ } // end namespace Eigen
155
+
156
+ #endif // EIGEN_SELECT_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SelfAdjointView.h ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SELFADJOINTMATRIX_H
11
+ #define EIGEN_SELFADJOINTMATRIX_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ /** \class SelfAdjointView
19
+ * \ingroup Core_Module
20
+ *
21
+ *
22
+ * \brief Expression of a selfadjoint matrix from a triangular part of a dense matrix
23
+ *
24
+ * \tparam MatrixType the type of the dense matrix storing the coefficients
25
+ * \tparam TriangularPart can be either \c #Lower or \c #Upper
26
+ *
27
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
28
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
29
+ * and most of the time this is the only way that it is used.
30
+ *
31
+ * \sa class TriangularBase, MatrixBase::selfadjointView()
32
+ */
33
+
34
+ namespace internal {
35
+ template <typename MatrixType, unsigned int UpLo>
36
+ struct traits<SelfAdjointView<MatrixType, UpLo> > : traits<MatrixType> {
37
+ typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested;
38
+ typedef remove_all_t<MatrixTypeNested> MatrixTypeNestedCleaned;
39
+ typedef MatrixType ExpressionType;
40
+ typedef typename MatrixType::PlainObject FullMatrixType;
41
+ enum {
42
+ Mode = UpLo | SelfAdjoint,
43
+ FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
44
+ Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits | FlagsLvalueBit) &
45
+ (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)) // FIXME these flags should be preserved
46
+ };
47
+ };
48
+ } // namespace internal
49
+
50
+ template <typename MatrixType_, unsigned int UpLo>
51
+ class SelfAdjointView : public TriangularBase<SelfAdjointView<MatrixType_, UpLo> > {
52
+ public:
53
+ EIGEN_STATIC_ASSERT(UpLo == Lower || UpLo == Upper, SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY)
54
+
55
+ typedef MatrixType_ MatrixType;
56
+ typedef TriangularBase<SelfAdjointView> Base;
57
+ typedef typename internal::traits<SelfAdjointView>::MatrixTypeNested MatrixTypeNested;
58
+ typedef typename internal::traits<SelfAdjointView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
59
+ typedef MatrixTypeNestedCleaned NestedExpression;
60
+
61
+ /** \brief The type of coefficients in this matrix */
62
+ typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
63
+ typedef typename MatrixType::StorageIndex StorageIndex;
64
+ typedef internal::remove_all_t<typename MatrixType::ConjugateReturnType> MatrixConjugateReturnType;
65
+ typedef SelfAdjointView<std::add_const_t<MatrixType>, UpLo> ConstSelfAdjointView;
66
+
67
+ enum {
68
+ Mode = internal::traits<SelfAdjointView>::Mode,
69
+ Flags = internal::traits<SelfAdjointView>::Flags,
70
+ TransposeMode = ((int(Mode) & int(Upper)) ? Lower : 0) | ((int(Mode) & int(Lower)) ? Upper : 0)
71
+ };
72
+ typedef typename MatrixType::PlainObject PlainObject;
73
+
74
+ EIGEN_DEVICE_FUNC explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix) {}
75
+
76
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const EIGEN_NOEXCEPT { return m_matrix.rows(); }
77
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const EIGEN_NOEXCEPT { return m_matrix.cols(); }
78
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index outerStride() const EIGEN_NOEXCEPT { return m_matrix.outerStride(); }
79
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index innerStride() const EIGEN_NOEXCEPT { return m_matrix.innerStride(); }
80
+
81
+ /** \sa MatrixBase::coeff()
82
+ * \warning the coordinates must fit into the referenced triangular part
83
+ */
84
+ EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const {
85
+ Base::check_coordinates_internal(row, col);
86
+ return m_matrix.coeff(row, col);
87
+ }
88
+
89
+ /** \sa MatrixBase::coeffRef()
90
+ * \warning the coordinates must fit into the referenced triangular part
91
+ */
92
+ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) {
93
+ EIGEN_STATIC_ASSERT_LVALUE(SelfAdjointView);
94
+ Base::check_coordinates_internal(row, col);
95
+ return m_matrix.coeffRef(row, col);
96
+ }
97
+
98
+ /** \internal */
99
+ EIGEN_DEVICE_FUNC const MatrixTypeNestedCleaned& _expression() const { return m_matrix; }
100
+
101
+ EIGEN_DEVICE_FUNC const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
102
+ EIGEN_DEVICE_FUNC MatrixTypeNestedCleaned& nestedExpression() { return m_matrix; }
103
+
104
+ /** Efficient triangular matrix times vector/matrix product */
105
+ template <typename OtherDerived>
106
+ EIGEN_DEVICE_FUNC const Product<SelfAdjointView, OtherDerived> operator*(const MatrixBase<OtherDerived>& rhs) const {
107
+ return Product<SelfAdjointView, OtherDerived>(*this, rhs.derived());
108
+ }
109
+
110
+ /** Efficient vector/matrix times triangular matrix product */
111
+ template <typename OtherDerived>
112
+ friend EIGEN_DEVICE_FUNC const Product<OtherDerived, SelfAdjointView> operator*(const MatrixBase<OtherDerived>& lhs,
113
+ const SelfAdjointView& rhs) {
114
+ return Product<OtherDerived, SelfAdjointView>(lhs.derived(), rhs);
115
+ }
116
+
117
+ friend EIGEN_DEVICE_FUNC const
118
+ SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar, MatrixType, product), UpLo>
119
+ operator*(const Scalar& s, const SelfAdjointView& mat) {
120
+ return (s * mat.nestedExpression()).template selfadjointView<UpLo>();
121
+ }
122
+
123
+ /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this:
124
+ * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$
125
+ * \returns a reference to \c *this
126
+ *
127
+ * The vectors \a u and \c v \b must be column vectors, however they can be
128
+ * a adjoint expression without any overhead. Only the meaningful triangular
129
+ * part of the matrix is updated, the rest is left unchanged.
130
+ *
131
+ * \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
132
+ */
133
+ template <typename DerivedU, typename DerivedV>
134
+ EIGEN_DEVICE_FUNC SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v,
135
+ const Scalar& alpha = Scalar(1));
136
+
137
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
138
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
139
+ *
140
+ * \returns a reference to \c *this
141
+ *
142
+ * Note that to perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
143
+ * call this function with u.adjoint().
144
+ *
145
+ * \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
146
+ */
147
+ template <typename DerivedU>
148
+ EIGEN_DEVICE_FUNC SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
149
+
150
+ /** \returns an expression of a triangular view extracted from the current selfadjoint view of a given triangular part
151
+ *
152
+ * The parameter \a TriMode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
153
+ * \c #Lower, \c #StrictlyLower, \c #UnitLower.
154
+ *
155
+ * If \c TriMode references the same triangular part than \c *this, then this method simply return a \c TriangularView
156
+ * of the nested expression, otherwise, the nested expression is first transposed, thus returning a \c
157
+ * TriangularView<Transpose<MatrixType>> object.
158
+ *
159
+ * \sa MatrixBase::triangularView(), class TriangularView
160
+ */
161
+ template <unsigned int TriMode>
162
+ EIGEN_DEVICE_FUNC
163
+ std::conditional_t<(TriMode & (Upper | Lower)) == (UpLo & (Upper | Lower)), TriangularView<MatrixType, TriMode>,
164
+ TriangularView<typename MatrixType::AdjointReturnType, TriMode> >
165
+ triangularView() const {
166
+ std::conditional_t<(TriMode & (Upper | Lower)) == (UpLo & (Upper | Lower)), MatrixType&,
167
+ typename MatrixType::ConstTransposeReturnType>
168
+ tmp1(m_matrix);
169
+ std::conditional_t<(TriMode & (Upper | Lower)) == (UpLo & (Upper | Lower)), MatrixType&,
170
+ typename MatrixType::AdjointReturnType>
171
+ tmp2(tmp1);
172
+ return std::conditional_t<(TriMode & (Upper | Lower)) == (UpLo & (Upper | Lower)),
173
+ TriangularView<MatrixType, TriMode>,
174
+ TriangularView<typename MatrixType::AdjointReturnType, TriMode> >(tmp2);
175
+ }
176
+
177
+ typedef SelfAdjointView<const MatrixConjugateReturnType, UpLo> ConjugateReturnType;
178
+ /** \sa MatrixBase::conjugate() const */
179
+ EIGEN_DEVICE_FUNC inline const ConjugateReturnType conjugate() const {
180
+ return ConjugateReturnType(m_matrix.conjugate());
181
+ }
182
+
183
+ /** \returns an expression of the complex conjugate of \c *this if Cond==true,
184
+ * returns \c *this otherwise.
185
+ */
186
+ template <bool Cond>
187
+ EIGEN_DEVICE_FUNC inline std::conditional_t<Cond, ConjugateReturnType, ConstSelfAdjointView> conjugateIf() const {
188
+ typedef std::conditional_t<Cond, ConjugateReturnType, ConstSelfAdjointView> ReturnType;
189
+ return ReturnType(m_matrix.template conjugateIf<Cond>());
190
+ }
191
+
192
+ typedef SelfAdjointView<const typename MatrixType::AdjointReturnType, TransposeMode> AdjointReturnType;
193
+ /** \sa MatrixBase::adjoint() const */
194
+ EIGEN_DEVICE_FUNC inline const AdjointReturnType adjoint() const { return AdjointReturnType(m_matrix.adjoint()); }
195
+
196
+ typedef SelfAdjointView<typename MatrixType::TransposeReturnType, TransposeMode> TransposeReturnType;
197
+ /** \sa MatrixBase::transpose() */
198
+ template <class Dummy = int>
199
+ EIGEN_DEVICE_FUNC inline TransposeReturnType transpose(
200
+ std::enable_if_t<Eigen::internal::is_lvalue<MatrixType>::value, Dummy*> = nullptr) {
201
+ typename MatrixType::TransposeReturnType tmp(m_matrix);
202
+ return TransposeReturnType(tmp);
203
+ }
204
+
205
+ typedef SelfAdjointView<const typename MatrixType::ConstTransposeReturnType, TransposeMode> ConstTransposeReturnType;
206
+ /** \sa MatrixBase::transpose() const */
207
+ EIGEN_DEVICE_FUNC inline const ConstTransposeReturnType transpose() const {
208
+ return ConstTransposeReturnType(m_matrix.transpose());
209
+ }
210
+
211
+ /** \returns a const expression of the main diagonal of the matrix \c *this
212
+ *
213
+ * This method simply returns the diagonal of the nested expression, thus by-passing the SelfAdjointView decorator.
214
+ *
215
+ * \sa MatrixBase::diagonal(), class Diagonal */
216
+ EIGEN_DEVICE_FUNC typename MatrixType::ConstDiagonalReturnType diagonal() const {
217
+ return typename MatrixType::ConstDiagonalReturnType(m_matrix);
218
+ }
219
+
220
+ /////////// Cholesky module ///////////
221
+
222
+ const LLT<PlainObject, UpLo> llt() const;
223
+ const LDLT<PlainObject, UpLo> ldlt() const;
224
+
225
+ /////////// Eigenvalue module ///////////
226
+
227
+ /** Real part of #Scalar */
228
+ typedef typename NumTraits<Scalar>::Real RealScalar;
229
+ /** Return type of eigenvalues() */
230
+ typedef Matrix<RealScalar, internal::traits<MatrixType>::ColsAtCompileTime, 1> EigenvaluesReturnType;
231
+
232
+ EIGEN_DEVICE_FUNC EigenvaluesReturnType eigenvalues() const;
233
+ EIGEN_DEVICE_FUNC RealScalar operatorNorm() const;
234
+
235
+ protected:
236
+ MatrixTypeNested m_matrix;
237
+ };
238
+
239
+ // template<typename OtherDerived, typename MatrixType, unsigned int UpLo>
240
+ // internal::selfadjoint_matrix_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >
241
+ // operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView<MatrixType,UpLo>& rhs)
242
+ // {
243
+ // return internal::matrix_selfadjoint_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo>
244
+ // >(lhs.derived(),rhs);
245
+ // }
246
+
247
+ // selfadjoint to dense matrix
248
+
249
+ namespace internal {
250
+
251
+ // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
252
+ // in the future selfadjoint-ness should be defined by the expression traits
253
+ // such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to
254
+ // make it work)
255
+ template <typename MatrixType, unsigned int Mode>
256
+ struct evaluator_traits<SelfAdjointView<MatrixType, Mode> > {
257
+ typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
258
+ typedef SelfAdjointShape Shape;
259
+ };
260
+
261
+ template <int UpLo, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor,
262
+ int Version>
263
+ class triangular_dense_assignment_kernel<UpLo, SelfAdjoint, SetOpposite, DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor,
264
+ Version>
265
+ : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> {
266
+ protected:
267
+ typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base;
268
+ typedef typename Base::DstXprType DstXprType;
269
+ typedef typename Base::SrcXprType SrcXprType;
270
+ using Base::m_dst;
271
+ using Base::m_functor;
272
+ using Base::m_src;
273
+
274
+ public:
275
+ typedef typename Base::DstEvaluatorType DstEvaluatorType;
276
+ typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
277
+ typedef typename Base::Scalar Scalar;
278
+ typedef typename Base::AssignmentTraits AssignmentTraits;
279
+
280
+ EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType& dst, const SrcEvaluatorType& src,
281
+ const Functor& func, DstXprType& dstExpr)
282
+ : Base(dst, src, func, dstExpr) {}
283
+
284
+ EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col) {
285
+ eigen_internal_assert(row != col);
286
+ Scalar tmp = m_src.coeff(row, col);
287
+ m_functor.assignCoeff(m_dst.coeffRef(row, col), tmp);
288
+ m_functor.assignCoeff(m_dst.coeffRef(col, row), numext::conj(tmp));
289
+ }
290
+
291
+ EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id) { Base::assignCoeff(id, id); }
292
+
293
+ EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index) { eigen_internal_assert(false && "should never be called"); }
294
+ };
295
+
296
+ } // end namespace internal
297
+
298
+ /***************************************************************************
299
+ * Implementation of MatrixBase methods
300
+ ***************************************************************************/
301
+
302
+ /** This is the const version of MatrixBase::selfadjointView() */
303
+ template <typename Derived>
304
+ template <unsigned int UpLo>
305
+ EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
306
+ MatrixBase<Derived>::selfadjointView() const {
307
+ return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived());
308
+ }
309
+
310
+ /** \returns an expression of a symmetric/self-adjoint view extracted from the upper or lower triangular part of the
311
+ * current matrix
312
+ *
313
+ * The parameter \a UpLo can be either \c #Upper or \c #Lower
314
+ *
315
+ * Example: \include MatrixBase_selfadjointView.cpp
316
+ * Output: \verbinclude MatrixBase_selfadjointView.out
317
+ *
318
+ * \sa class SelfAdjointView
319
+ */
320
+ template <typename Derived>
321
+ template <unsigned int UpLo>
322
+ EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
323
+ MatrixBase<Derived>::selfadjointView() {
324
+ return typename SelfAdjointViewReturnType<UpLo>::Type(derived());
325
+ }
326
+
327
+ } // end namespace Eigen
328
+
329
+ #endif // EIGEN_SELFADJOINTMATRIX_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SkewSymmetricMatrix3.h ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_SKEWSYMMETRICMATRIX3_H
12
+ #define EIGEN_SKEWSYMMETRICMATRIX3_H
13
+
14
+ // IWYU pragma: private
15
+ #include "./InternalHeaderCheck.h"
16
+
17
+ namespace Eigen {
18
+
19
+ /** \class SkewSymmetricBase
20
+ * \ingroup Core_Module
21
+ *
22
+ * \brief Base class for skew symmetric matrices and expressions
23
+ *
24
+ * This is the base class that is inherited by SkewSymmetricMatrix3 and related expression
25
+ * types, which internally use a three vector for storing the entries. SkewSymmetric
26
+ * types always represent square three times three matrices.
27
+ *
28
+ * This implementations follows class DiagonalMatrix
29
+ *
30
+ * \tparam Derived is the derived type, a SkewSymmetricMatrix3 or SkewSymmetricWrapper.
31
+ *
32
+ * \sa class SkewSymmetricMatrix3, class SkewSymmetricWrapper
33
+ */
34
+ template <typename Derived>
35
+ class SkewSymmetricBase : public EigenBase<Derived> {
36
+ public:
37
+ typedef typename internal::traits<Derived>::SkewSymmetricVectorType SkewSymmetricVectorType;
38
+ typedef typename SkewSymmetricVectorType::Scalar Scalar;
39
+ typedef typename SkewSymmetricVectorType::RealScalar RealScalar;
40
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
41
+ typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
42
+
43
+ enum {
44
+ RowsAtCompileTime = SkewSymmetricVectorType::SizeAtCompileTime,
45
+ ColsAtCompileTime = SkewSymmetricVectorType::SizeAtCompileTime,
46
+ MaxRowsAtCompileTime = SkewSymmetricVectorType::MaxSizeAtCompileTime,
47
+ MaxColsAtCompileTime = SkewSymmetricVectorType::MaxSizeAtCompileTime,
48
+ IsVectorAtCompileTime = 0,
49
+ Flags = NoPreferredStorageOrderBit
50
+ };
51
+
52
+ typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime>
53
+ DenseMatrixType;
54
+ typedef DenseMatrixType DenseType;
55
+ typedef SkewSymmetricMatrix3<Scalar> PlainObject;
56
+
57
+ /** \returns a reference to the derived object. */
58
+ EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
59
+ /** \returns a const reference to the derived object. */
60
+ EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); }
61
+
62
+ /**
63
+ * Constructs a dense matrix from \c *this. Note, this directly returns a dense matrix type,
64
+ * not an expression.
65
+ * \returns A dense matrix, with its entries set from the the derived object. */
66
+ EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { return derived(); }
67
+
68
+ /** Determinant vanishes */
69
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Scalar determinant() const { return 0; }
70
+
71
+ /** A.transpose() = -A */
72
+ EIGEN_DEVICE_FUNC PlainObject transpose() const { return (-vector()).asSkewSymmetric(); }
73
+
74
+ /** \returns the exponential of this matrix using Rodrigues’ formula */
75
+ EIGEN_DEVICE_FUNC DenseMatrixType exponential() const {
76
+ DenseMatrixType retVal = DenseMatrixType::Identity();
77
+ const SkewSymmetricVectorType& v = vector();
78
+ if (v.isZero()) {
79
+ return retVal;
80
+ }
81
+ const Scalar norm2 = v.squaredNorm();
82
+ const Scalar norm = numext::sqrt(norm2);
83
+ retVal += ((((1 - numext::cos(norm)) / norm2) * derived()) * derived()) +
84
+ (numext::sin(norm) / norm) * derived().toDenseMatrix();
85
+ return retVal;
86
+ }
87
+
88
+ /** \returns a reference to the derived object's vector of coefficients. */
89
+ EIGEN_DEVICE_FUNC inline const SkewSymmetricVectorType& vector() const { return derived().vector(); }
90
+ /** \returns a const reference to the derived object's vector of coefficients. */
91
+ EIGEN_DEVICE_FUNC inline SkewSymmetricVectorType& vector() { return derived().vector(); }
92
+
93
+ /** \returns the number of rows. */
94
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index rows() const { return 3; }
95
+ /** \returns the number of columns. */
96
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR inline Index cols() const { return 3; }
97
+
98
+ /** \returns the matrix product of \c *this by the dense matrix, \a matrix */
99
+ template <typename MatrixDerived>
100
+ EIGEN_DEVICE_FUNC Product<Derived, MatrixDerived, LazyProduct> operator*(
101
+ const MatrixBase<MatrixDerived>& matrix) const {
102
+ return Product<Derived, MatrixDerived, LazyProduct>(derived(), matrix.derived());
103
+ }
104
+
105
+ /** \returns the matrix product of \c *this by the skew symmetric matrix, \a matrix */
106
+ template <typename MatrixDerived>
107
+ EIGEN_DEVICE_FUNC Product<Derived, MatrixDerived, LazyProduct> operator*(
108
+ const SkewSymmetricBase<MatrixDerived>& matrix) const {
109
+ return Product<Derived, MatrixDerived, LazyProduct>(derived(), matrix.derived());
110
+ }
111
+
112
+ template <typename OtherDerived>
113
+ using SkewSymmetricProductReturnType = SkewSymmetricWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
114
+ SkewSymmetricVectorType, typename OtherDerived::SkewSymmetricVectorType, product)>;
115
+
116
+ /** \returns the wedge product of \c *this by the skew symmetric matrix \a other
117
+ * A wedge B = AB - BA */
118
+ template <typename OtherDerived>
119
+ EIGEN_DEVICE_FUNC SkewSymmetricProductReturnType<OtherDerived> wedge(
120
+ const SkewSymmetricBase<OtherDerived>& other) const {
121
+ return vector().cross(other.vector()).asSkewSymmetric();
122
+ }
123
+
124
+ using SkewSymmetricScaleReturnType =
125
+ SkewSymmetricWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(SkewSymmetricVectorType, Scalar, product)>;
126
+
127
+ /** \returns the product of \c *this by the scalar \a scalar */
128
+ EIGEN_DEVICE_FUNC inline SkewSymmetricScaleReturnType operator*(const Scalar& scalar) const {
129
+ return (vector() * scalar).asSkewSymmetric();
130
+ }
131
+
132
+ using ScaleSkewSymmetricReturnType =
133
+ SkewSymmetricWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar, SkewSymmetricVectorType, product)>;
134
+
135
+ /** \returns the product of a scalar and the skew symmetric matrix \a other */
136
+ EIGEN_DEVICE_FUNC friend inline ScaleSkewSymmetricReturnType operator*(const Scalar& scalar,
137
+ const SkewSymmetricBase& other) {
138
+ return (scalar * other.vector()).asSkewSymmetric();
139
+ }
140
+
141
+ template <typename OtherDerived>
142
+ using SkewSymmetricSumReturnType = SkewSymmetricWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
143
+ SkewSymmetricVectorType, typename OtherDerived::SkewSymmetricVectorType, sum)>;
144
+
145
+ /** \returns the sum of \c *this and the skew symmetric matrix \a other */
146
+ template <typename OtherDerived>
147
+ EIGEN_DEVICE_FUNC inline SkewSymmetricSumReturnType<OtherDerived> operator+(
148
+ const SkewSymmetricBase<OtherDerived>& other) const {
149
+ return (vector() + other.vector()).asSkewSymmetric();
150
+ }
151
+
152
+ template <typename OtherDerived>
153
+ using SkewSymmetricDifferenceReturnType = SkewSymmetricWrapper<const EIGEN_CWISE_BINARY_RETURN_TYPE(
154
+ SkewSymmetricVectorType, typename OtherDerived::SkewSymmetricVectorType, difference)>;
155
+
156
+ /** \returns the difference of \c *this and the skew symmetric matrix \a other */
157
+ template <typename OtherDerived>
158
+ EIGEN_DEVICE_FUNC inline SkewSymmetricDifferenceReturnType<OtherDerived> operator-(
159
+ const SkewSymmetricBase<OtherDerived>& other) const {
160
+ return (vector() - other.vector()).asSkewSymmetric();
161
+ }
162
+ };
163
+
164
+ /** \class SkewSymmetricMatrix3
165
+ * \ingroup Core_Module
166
+ *
167
+ * \brief Represents a 3x3 skew symmetric matrix with its storage
168
+ *
169
+ * \tparam Scalar_ the type of coefficients
170
+ *
171
+ * \sa class SkewSymmetricBase, class SkewSymmetricWrapper
172
+ */
173
+
174
+ namespace internal {
175
+ template <typename Scalar_>
176
+ struct traits<SkewSymmetricMatrix3<Scalar_>> : traits<Matrix<Scalar_, 3, 3, 0, 3, 3>> {
177
+ typedef Matrix<Scalar_, 3, 1, 0, 3, 1> SkewSymmetricVectorType;
178
+ typedef SkewSymmetricShape StorageKind;
179
+ enum { Flags = LvalueBit | NoPreferredStorageOrderBit | NestByRefBit };
180
+ };
181
+ } // namespace internal
182
+ template <typename Scalar_>
183
+ class SkewSymmetricMatrix3 : public SkewSymmetricBase<SkewSymmetricMatrix3<Scalar_>> {
184
+ public:
185
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
186
+ typedef typename internal::traits<SkewSymmetricMatrix3>::SkewSymmetricVectorType SkewSymmetricVectorType;
187
+ typedef const SkewSymmetricMatrix3& Nested;
188
+ typedef Scalar_ Scalar;
189
+ typedef typename internal::traits<SkewSymmetricMatrix3>::StorageKind StorageKind;
190
+ typedef typename internal::traits<SkewSymmetricMatrix3>::StorageIndex StorageIndex;
191
+ #endif
192
+
193
+ protected:
194
+ SkewSymmetricVectorType m_vector;
195
+
196
+ public:
197
+ /** const version of vector(). */
198
+ EIGEN_DEVICE_FUNC inline const SkewSymmetricVectorType& vector() const { return m_vector; }
199
+ /** \returns a reference to the stored vector of coefficients. */
200
+ EIGEN_DEVICE_FUNC inline SkewSymmetricVectorType& vector() { return m_vector; }
201
+
202
+ /** Default constructor without initialization */
203
+ EIGEN_DEVICE_FUNC inline SkewSymmetricMatrix3() {}
204
+
205
+ /** Constructor from three scalars */
206
+ EIGEN_DEVICE_FUNC inline SkewSymmetricMatrix3(const Scalar& x, const Scalar& y, const Scalar& z)
207
+ : m_vector(x, y, z) {}
208
+
209
+ /** \brief Constructs a SkewSymmetricMatrix3 from an r-value vector type */
210
+ EIGEN_DEVICE_FUNC explicit inline SkewSymmetricMatrix3(SkewSymmetricVectorType&& vec) : m_vector(std::move(vec)) {}
211
+
212
+ /** generic constructor from expression of the coefficients */
213
+ template <typename OtherDerived>
214
+ EIGEN_DEVICE_FUNC explicit inline SkewSymmetricMatrix3(const MatrixBase<OtherDerived>& other) : m_vector(other) {}
215
+
216
+ /** Copy constructor. */
217
+ template <typename OtherDerived>
218
+ EIGEN_DEVICE_FUNC inline SkewSymmetricMatrix3(const SkewSymmetricBase<OtherDerived>& other)
219
+ : m_vector(other.vector()) {}
220
+
221
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
222
+ /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
223
+ inline SkewSymmetricMatrix3(const SkewSymmetricMatrix3& other) : m_vector(other.vector()) {}
224
+ #endif
225
+
226
+ /** Copy operator. */
227
+ template <typename OtherDerived>
228
+ EIGEN_DEVICE_FUNC SkewSymmetricMatrix3& operator=(const SkewSymmetricBase<OtherDerived>& other) {
229
+ m_vector = other.vector();
230
+ return *this;
231
+ }
232
+
233
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
234
+ /** This is a special case of the templated operator=. Its purpose is to
235
+ * prevent a default operator= from hiding the templated operator=.
236
+ */
237
+ EIGEN_DEVICE_FUNC SkewSymmetricMatrix3& operator=(const SkewSymmetricMatrix3& other) {
238
+ m_vector = other.vector();
239
+ return *this;
240
+ }
241
+ #endif
242
+
243
+ typedef SkewSymmetricWrapper<const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, SkewSymmetricVectorType>>
244
+ InitializeReturnType;
245
+
246
+ /** Initializes a skew symmetric matrix with coefficients set to zero */
247
+ EIGEN_DEVICE_FUNC static InitializeReturnType Zero() { return SkewSymmetricVectorType::Zero().asSkewSymmetric(); }
248
+
249
+ /** Sets all coefficients to zero. */
250
+ EIGEN_DEVICE_FUNC inline void setZero() { m_vector.setZero(); }
251
+ };
252
+
253
+ /** \class SkewSymmetricWrapper
254
+ * \ingroup Core_Module
255
+ *
256
+ * \brief Expression of a skew symmetric matrix
257
+ *
258
+ * \tparam SkewSymmetricVectorType_ the type of the vector of coefficients
259
+ *
260
+ * This class is an expression of a skew symmetric matrix, but not storing its own vector of coefficients,
261
+ * instead wrapping an existing vector expression. It is the return type of MatrixBase::asSkewSymmetric()
262
+ * and most of the time this is the only way that it is used.
263
+ *
264
+ * \sa class SkewSymmetricMatrix3, class SkewSymmetricBase, MatrixBase::asSkewSymmetric()
265
+ */
266
+
267
+ namespace internal {
268
+ template <typename SkewSymmetricVectorType_>
269
+ struct traits<SkewSymmetricWrapper<SkewSymmetricVectorType_>> {
270
+ typedef SkewSymmetricVectorType_ SkewSymmetricVectorType;
271
+ typedef typename SkewSymmetricVectorType::Scalar Scalar;
272
+ typedef typename SkewSymmetricVectorType::StorageIndex StorageIndex;
273
+ typedef SkewSymmetricShape StorageKind;
274
+ typedef typename traits<SkewSymmetricVectorType>::XprKind XprKind;
275
+ enum {
276
+ RowsAtCompileTime = SkewSymmetricVectorType::SizeAtCompileTime,
277
+ ColsAtCompileTime = SkewSymmetricVectorType::SizeAtCompileTime,
278
+ MaxRowsAtCompileTime = SkewSymmetricVectorType::MaxSizeAtCompileTime,
279
+ MaxColsAtCompileTime = SkewSymmetricVectorType::MaxSizeAtCompileTime,
280
+ Flags = (traits<SkewSymmetricVectorType>::Flags & LvalueBit) | NoPreferredStorageOrderBit
281
+ };
282
+ };
283
+ } // namespace internal
284
+
285
+ template <typename SkewSymmetricVectorType_>
286
+ class SkewSymmetricWrapper : public SkewSymmetricBase<SkewSymmetricWrapper<SkewSymmetricVectorType_>>,
287
+ internal::no_assignment_operator {
288
+ public:
289
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
290
+ typedef SkewSymmetricVectorType_ SkewSymmetricVectorType;
291
+ typedef SkewSymmetricWrapper Nested;
292
+ #endif
293
+
294
+ /** Constructor from expression of coefficients to wrap. */
295
+ EIGEN_DEVICE_FUNC explicit inline SkewSymmetricWrapper(SkewSymmetricVectorType& a_vector) : m_vector(a_vector) {}
296
+
297
+ /** \returns a const reference to the wrapped expression of coefficients. */
298
+ EIGEN_DEVICE_FUNC const SkewSymmetricVectorType& vector() const { return m_vector; }
299
+
300
+ protected:
301
+ typename SkewSymmetricVectorType::Nested m_vector;
302
+ };
303
+
304
+ /** \returns a pseudo-expression of a skew symmetric matrix with *this as vector of coefficients
305
+ *
306
+ * \only_for_vectors
307
+ *
308
+ * \sa class SkewSymmetricWrapper, class SkewSymmetricMatrix3, vector(), isSkewSymmetric()
309
+ **/
310
+ template <typename Derived>
311
+ EIGEN_DEVICE_FUNC inline const SkewSymmetricWrapper<const Derived> MatrixBase<Derived>::asSkewSymmetric() const {
312
+ return SkewSymmetricWrapper<const Derived>(derived());
313
+ }
314
+
315
+ /** \returns true if *this is approximately equal to a skew symmetric matrix,
316
+ * within the precision given by \a prec.
317
+ */
318
+ template <typename Derived>
319
+ bool MatrixBase<Derived>::isSkewSymmetric(const RealScalar& prec) const {
320
+ if (cols() != rows()) return false;
321
+ return (this->transpose() + *this).isZero(prec);
322
+ }
323
+
324
+ /** \returns the matrix product of \c *this by the skew symmetric matrix \skew.
325
+ */
326
+ template <typename Derived>
327
+ template <typename SkewDerived>
328
+ EIGEN_DEVICE_FUNC inline const Product<Derived, SkewDerived, LazyProduct> MatrixBase<Derived>::operator*(
329
+ const SkewSymmetricBase<SkewDerived>& skew) const {
330
+ return Product<Derived, SkewDerived, LazyProduct>(derived(), skew.derived());
331
+ }
332
+
333
+ namespace internal {
334
+
335
+ template <>
336
+ struct storage_kind_to_shape<SkewSymmetricShape> {
337
+ typedef SkewSymmetricShape Shape;
338
+ };
339
+
340
+ struct SkewSymmetric2Dense {};
341
+
342
+ template <>
343
+ struct AssignmentKind<DenseShape, SkewSymmetricShape> {
344
+ typedef SkewSymmetric2Dense Kind;
345
+ };
346
+
347
+ // SkewSymmetric matrix to Dense assignment
348
+ template <typename DstXprType, typename SrcXprType, typename Functor>
349
+ struct Assignment<DstXprType, SrcXprType, Functor, SkewSymmetric2Dense> {
350
+ EIGEN_DEVICE_FUNC static void run(
351
+ DstXprType& dst, const SrcXprType& src,
352
+ const internal::assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
353
+ if ((dst.rows() != 3) || (dst.cols() != 3)) {
354
+ dst.resize(3, 3);
355
+ }
356
+ dst.diagonal().setZero();
357
+ const typename SrcXprType::SkewSymmetricVectorType v = src.vector();
358
+ dst(0, 1) = -v(2);
359
+ dst(1, 0) = v(2);
360
+ dst(0, 2) = v(1);
361
+ dst(2, 0) = -v(1);
362
+ dst(1, 2) = -v(0);
363
+ dst(2, 1) = v(0);
364
+ }
365
+ EIGEN_DEVICE_FUNC static void run(
366
+ DstXprType& dst, const SrcXprType& src,
367
+ const internal::add_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
368
+ dst.vector() += src.vector();
369
+ }
370
+
371
+ EIGEN_DEVICE_FUNC static void run(
372
+ DstXprType& dst, const SrcXprType& src,
373
+ const internal::sub_assign_op<typename DstXprType::Scalar, typename SrcXprType::Scalar>& /*func*/) {
374
+ dst.vector() -= src.vector();
375
+ }
376
+ };
377
+
378
+ } // namespace internal
379
+
380
+ } // end namespace Eigen
381
+
382
+ #endif // EIGEN_SKEWSYMMETRICMATRIX3_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Solve.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SOLVE_H
11
+ #define EIGEN_SOLVE_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ template <typename Decomposition, typename RhsType, typename StorageKind>
19
+ class SolveImpl;
20
+
21
+ /** \class Solve
22
+ * \ingroup Core_Module
23
+ *
24
+ * \brief Pseudo expression representing a solving operation
25
+ *
26
+ * \tparam Decomposition the type of the matrix or decomposition object
27
+ * \tparam Rhstype the type of the right-hand side
28
+ *
29
+ * This class represents an expression of A.solve(B)
30
+ * and most of the time this is the only way it is used.
31
+ *
32
+ */
33
+ namespace internal {
34
+
35
+ // this solve_traits class permits to determine the evaluation type with respect to storage kind (Dense vs Sparse)
36
+ template <typename Decomposition, typename RhsType, typename StorageKind>
37
+ struct solve_traits;
38
+
39
+ template <typename Decomposition, typename RhsType>
40
+ struct solve_traits<Decomposition, RhsType, Dense> {
41
+ typedef typename make_proper_matrix_type<typename RhsType::Scalar, Decomposition::ColsAtCompileTime,
42
+ RhsType::ColsAtCompileTime, RhsType::PlainObject::Options,
43
+ Decomposition::MaxColsAtCompileTime, RhsType::MaxColsAtCompileTime>::type
44
+ PlainObject;
45
+ };
46
+
47
+ template <typename Decomposition, typename RhsType>
48
+ struct traits<Solve<Decomposition, RhsType> >
49
+ : traits<
50
+ typename solve_traits<Decomposition, RhsType, typename internal::traits<RhsType>::StorageKind>::PlainObject> {
51
+ typedef typename solve_traits<Decomposition, RhsType, typename internal::traits<RhsType>::StorageKind>::PlainObject
52
+ PlainObject;
53
+ typedef typename promote_index_type<typename Decomposition::StorageIndex, typename RhsType::StorageIndex>::type
54
+ StorageIndex;
55
+ typedef traits<PlainObject> BaseTraits;
56
+ enum { Flags = BaseTraits::Flags & RowMajorBit, CoeffReadCost = HugeCost };
57
+ };
58
+
59
+ } // namespace internal
60
+
61
+ template <typename Decomposition, typename RhsType>
62
+ class Solve : public SolveImpl<Decomposition, RhsType, typename internal::traits<RhsType>::StorageKind> {
63
+ public:
64
+ typedef typename internal::traits<Solve>::PlainObject PlainObject;
65
+ typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
66
+
67
+ Solve(const Decomposition &dec, const RhsType &rhs) : m_dec(dec), m_rhs(rhs) {}
68
+
69
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_dec.cols(); }
70
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
71
+
72
+ EIGEN_DEVICE_FUNC const Decomposition &dec() const { return m_dec; }
73
+ EIGEN_DEVICE_FUNC const RhsType &rhs() const { return m_rhs; }
74
+
75
+ protected:
76
+ const Decomposition &m_dec;
77
+ const typename internal::ref_selector<RhsType>::type m_rhs;
78
+ };
79
+
80
+ // Specialization of the Solve expression for dense results
81
+ template <typename Decomposition, typename RhsType>
82
+ class SolveImpl<Decomposition, RhsType, Dense> : public MatrixBase<Solve<Decomposition, RhsType> > {
83
+ typedef Solve<Decomposition, RhsType> Derived;
84
+
85
+ public:
86
+ typedef MatrixBase<Solve<Decomposition, RhsType> > Base;
87
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
88
+
89
+ private:
90
+ Scalar coeff(Index row, Index col) const;
91
+ Scalar coeff(Index i) const;
92
+ };
93
+
94
+ // Generic API dispatcher
95
+ template <typename Decomposition, typename RhsType, typename StorageKind>
96
+ class SolveImpl : public internal::generic_xpr_base<Solve<Decomposition, RhsType>, MatrixXpr, StorageKind>::type {
97
+ public:
98
+ typedef typename internal::generic_xpr_base<Solve<Decomposition, RhsType>, MatrixXpr, StorageKind>::type Base;
99
+ };
100
+
101
+ namespace internal {
102
+
103
+ // Evaluator of Solve -> eval into a temporary
104
+ template <typename Decomposition, typename RhsType>
105
+ struct evaluator<Solve<Decomposition, RhsType> >
106
+ : public evaluator<typename Solve<Decomposition, RhsType>::PlainObject> {
107
+ typedef Solve<Decomposition, RhsType> SolveType;
108
+ typedef typename SolveType::PlainObject PlainObject;
109
+ typedef evaluator<PlainObject> Base;
110
+
111
+ enum { Flags = Base::Flags | EvalBeforeNestingBit };
112
+
113
+ EIGEN_DEVICE_FUNC explicit evaluator(const SolveType &solve) : m_result(solve.rows(), solve.cols()) {
114
+ internal::construct_at<Base>(this, m_result);
115
+ solve.dec()._solve_impl(solve.rhs(), m_result);
116
+ }
117
+
118
+ protected:
119
+ PlainObject m_result;
120
+ };
121
+
122
+ // Specialization for "dst = dec.solve(rhs)"
123
+ // NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse
124
+ // specialization must exist somewhere
125
+ template <typename DstXprType, typename DecType, typename RhsType, typename Scalar>
126
+ struct Assignment<DstXprType, Solve<DecType, RhsType>, internal::assign_op<Scalar, Scalar>, Dense2Dense> {
127
+ typedef Solve<DecType, RhsType> SrcXprType;
128
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar, Scalar> &) {
129
+ Index dstRows = src.rows();
130
+ Index dstCols = src.cols();
131
+ if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols);
132
+
133
+ src.dec()._solve_impl(src.rhs(), dst);
134
+ }
135
+ };
136
+
137
+ // Specialization for "dst = dec.transpose().solve(rhs)"
138
+ template <typename DstXprType, typename DecType, typename RhsType, typename Scalar>
139
+ struct Assignment<DstXprType, Solve<Transpose<const DecType>, RhsType>, internal::assign_op<Scalar, Scalar>,
140
+ Dense2Dense> {
141
+ typedef Solve<Transpose<const DecType>, RhsType> SrcXprType;
142
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar, Scalar> &) {
143
+ Index dstRows = src.rows();
144
+ Index dstCols = src.cols();
145
+ if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols);
146
+
147
+ src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst);
148
+ }
149
+ };
150
+
151
+ // Specialization for "dst = dec.adjoint().solve(rhs)"
152
+ template <typename DstXprType, typename DecType, typename RhsType, typename Scalar>
153
+ struct Assignment<
154
+ DstXprType,
155
+ Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,
156
+ RhsType>,
157
+ internal::assign_op<Scalar, Scalar>, Dense2Dense> {
158
+ typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,
159
+ RhsType>
160
+ SrcXprType;
161
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar, Scalar> &) {
162
+ Index dstRows = src.rows();
163
+ Index dstCols = src.cols();
164
+ if ((dst.rows() != dstRows) || (dst.cols() != dstCols)) dst.resize(dstRows, dstCols);
165
+
166
+ src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
167
+ }
168
+ };
169
+
170
+ } // end namespace internal
171
+
172
+ } // end namespace Eigen
173
+
174
+ #endif // EIGEN_SOLVE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SolveTriangular.h ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SOLVETRIANGULAR_H
11
+ #define EIGEN_SOLVETRIANGULAR_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ // Forward declarations:
21
+ // The following two routines are implemented in the products/TriangularSolver*.h files
22
+ template <typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>
23
+ struct triangular_solve_vector;
24
+
25
+ template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder,
26
+ int OtherStorageOrder, int OtherInnerStride>
27
+ struct triangular_solve_matrix;
28
+
29
+ // small helper struct extracting some traits on the underlying solver operation
30
+ template <typename Lhs, typename Rhs, int Side>
31
+ class trsolve_traits {
32
+ private:
33
+ enum { RhsIsVectorAtCompileTime = (Side == OnTheLeft ? Rhs::ColsAtCompileTime : Rhs::RowsAtCompileTime) == 1 };
34
+
35
+ public:
36
+ enum {
37
+ Unrolling = (RhsIsVectorAtCompileTime && Rhs::SizeAtCompileTime != Dynamic && Rhs::SizeAtCompileTime <= 8)
38
+ ? CompleteUnrolling
39
+ : NoUnrolling,
40
+ RhsVectors = RhsIsVectorAtCompileTime ? 1 : Dynamic
41
+ };
42
+ };
43
+
44
+ template <typename Lhs, typename Rhs,
45
+ int Side, // can be OnTheLeft/OnTheRight
46
+ int Mode, // can be Upper/Lower | UnitDiag
47
+ int Unrolling = trsolve_traits<Lhs, Rhs, Side>::Unrolling,
48
+ int RhsVectors = trsolve_traits<Lhs, Rhs, Side>::RhsVectors>
49
+ struct triangular_solver_selector;
50
+
51
+ template <typename Lhs, typename Rhs, int Side, int Mode>
52
+ struct triangular_solver_selector<Lhs, Rhs, Side, Mode, NoUnrolling, 1> {
53
+ typedef typename Lhs::Scalar LhsScalar;
54
+ typedef typename Rhs::Scalar RhsScalar;
55
+ typedef blas_traits<Lhs> LhsProductTraits;
56
+ typedef typename LhsProductTraits::ExtractType ActualLhsType;
57
+ typedef Map<Matrix<RhsScalar, Dynamic, 1>, Aligned> MappedRhs;
58
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs) {
59
+ ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
60
+
61
+ // FIXME find a way to allow an inner stride if packet_traits<Scalar>::size==1
62
+
63
+ bool useRhsDirectly = Rhs::InnerStrideAtCompileTime == 1 || rhs.innerStride() == 1;
64
+
65
+ ei_declare_aligned_stack_constructed_variable(RhsScalar, actualRhs, rhs.size(), (useRhsDirectly ? rhs.data() : 0));
66
+
67
+ if (!useRhsDirectly) MappedRhs(actualRhs, rhs.size()) = rhs;
68
+
69
+ triangular_solve_vector<LhsScalar, RhsScalar, Index, Side, Mode, LhsProductTraits::NeedToConjugate,
70
+ (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>::run(actualLhs.cols(),
71
+ actualLhs.data(),
72
+ actualLhs.outerStride(),
73
+ actualRhs);
74
+
75
+ if (!useRhsDirectly) rhs = MappedRhs(actualRhs, rhs.size());
76
+ }
77
+ };
78
+
79
+ // the rhs is a matrix
80
+ template <typename Lhs, typename Rhs, int Side, int Mode>
81
+ struct triangular_solver_selector<Lhs, Rhs, Side, Mode, NoUnrolling, Dynamic> {
82
+ typedef typename Rhs::Scalar Scalar;
83
+ typedef blas_traits<Lhs> LhsProductTraits;
84
+ typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
85
+
86
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs) {
87
+ add_const_on_value_type_t<ActualLhsType> actualLhs = LhsProductTraits::extract(lhs);
88
+
89
+ const Index size = lhs.rows();
90
+ const Index othersize = Side == OnTheLeft ? rhs.cols() : rhs.rows();
91
+
92
+ typedef internal::gemm_blocking_space<(Rhs::Flags & RowMajorBit) ? RowMajor : ColMajor, Scalar, Scalar,
93
+ Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime,
94
+ Lhs::MaxRowsAtCompileTime, 4>
95
+ BlockingType;
96
+
97
+ // Nothing to solve.
98
+ if (actualLhs.size() == 0 || rhs.size() == 0) {
99
+ return;
100
+ }
101
+
102
+ BlockingType blocking(rhs.rows(), rhs.cols(), size, 1, false);
103
+
104
+ triangular_solve_matrix<Scalar, Index, Side, Mode, LhsProductTraits::NeedToConjugate,
105
+ (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,
106
+ (Rhs::Flags & RowMajorBit) ? RowMajor : ColMajor,
107
+ Rhs::InnerStrideAtCompileTime>::run(size, othersize, &actualLhs.coeffRef(0, 0),
108
+ actualLhs.outerStride(), &rhs.coeffRef(0, 0),
109
+ rhs.innerStride(), rhs.outerStride(), blocking);
110
+ }
111
+ };
112
+
113
+ /***************************************************************************
114
+ * meta-unrolling implementation
115
+ ***************************************************************************/
116
+
117
+ template <typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size, bool Stop = LoopIndex == Size>
118
+ struct triangular_solver_unroller;
119
+
120
+ template <typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>
121
+ struct triangular_solver_unroller<Lhs, Rhs, Mode, LoopIndex, Size, false> {
122
+ enum {
123
+ IsLower = ((Mode & Lower) == Lower),
124
+ DiagIndex = IsLower ? LoopIndex : Size - LoopIndex - 1,
125
+ StartIndex = IsLower ? 0 : DiagIndex + 1
126
+ };
127
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs) {
128
+ if (LoopIndex > 0)
129
+ rhs.coeffRef(DiagIndex) -= lhs.row(DiagIndex)
130
+ .template segment<LoopIndex>(StartIndex)
131
+ .transpose()
132
+ .cwiseProduct(rhs.template segment<LoopIndex>(StartIndex))
133
+ .sum();
134
+
135
+ if (!(Mode & UnitDiag)) rhs.coeffRef(DiagIndex) /= lhs.coeff(DiagIndex, DiagIndex);
136
+
137
+ triangular_solver_unroller<Lhs, Rhs, Mode, LoopIndex + 1, Size>::run(lhs, rhs);
138
+ }
139
+ };
140
+
141
+ template <typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>
142
+ struct triangular_solver_unroller<Lhs, Rhs, Mode, LoopIndex, Size, true> {
143
+ static EIGEN_DEVICE_FUNC void run(const Lhs&, Rhs&) {}
144
+ };
145
+
146
+ template <typename Lhs, typename Rhs, int Mode>
147
+ struct triangular_solver_selector<Lhs, Rhs, OnTheLeft, Mode, CompleteUnrolling, 1> {
148
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs) {
149
+ triangular_solver_unroller<Lhs, Rhs, Mode, 0, Rhs::SizeAtCompileTime>::run(lhs, rhs);
150
+ }
151
+ };
152
+
153
+ template <typename Lhs, typename Rhs, int Mode>
154
+ struct triangular_solver_selector<Lhs, Rhs, OnTheRight, Mode, CompleteUnrolling, 1> {
155
+ static EIGEN_DEVICE_FUNC void run(const Lhs& lhs, Rhs& rhs) {
156
+ Transpose<const Lhs> trLhs(lhs);
157
+ Transpose<Rhs> trRhs(rhs);
158
+
159
+ triangular_solver_unroller<Transpose<const Lhs>, Transpose<Rhs>,
160
+ ((Mode & Upper) == Upper ? Lower : Upper) | (Mode & UnitDiag), 0,
161
+ Rhs::SizeAtCompileTime>::run(trLhs, trRhs);
162
+ }
163
+ };
164
+
165
+ } // end namespace internal
166
+
167
+ /***************************************************************************
168
+ * TriangularView methods
169
+ ***************************************************************************/
170
+
171
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
172
+ template <typename MatrixType, unsigned int Mode>
173
+ template <int Side, typename OtherDerived>
174
+ EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::solveInPlace(
175
+ const MatrixBase<OtherDerived>& _other) const {
176
+ OtherDerived& other = _other.const_cast_derived();
177
+ eigen_assert(derived().cols() == derived().rows() && ((Side == OnTheLeft && derived().cols() == other.rows()) ||
178
+ (Side == OnTheRight && derived().cols() == other.cols())));
179
+ eigen_assert((!(int(Mode) & int(ZeroDiag))) && bool(int(Mode) & (int(Upper) | int(Lower))));
180
+ // If solving for a 0x0 matrix, nothing to do, simply return.
181
+ if (derived().cols() == 0) return;
182
+
183
+ enum {
184
+ copy = (internal::traits<OtherDerived>::Flags & RowMajorBit) && OtherDerived::IsVectorAtCompileTime &&
185
+ OtherDerived::SizeAtCompileTime != 1
186
+ };
187
+ typedef std::conditional_t<copy, typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>
188
+ OtherCopy;
189
+ OtherCopy otherCopy(other);
190
+
191
+ internal::triangular_solver_selector<MatrixType, std::remove_reference_t<OtherCopy>, Side, Mode>::run(
192
+ derived().nestedExpression(), otherCopy);
193
+
194
+ if (copy) other = otherCopy;
195
+ }
196
+
197
+ template <typename Derived, unsigned int Mode>
198
+ template <int Side, typename Other>
199
+ const internal::triangular_solve_retval<Side, TriangularView<Derived, Mode>, Other>
200
+ TriangularViewImpl<Derived, Mode, Dense>::solve(const MatrixBase<Other>& other) const {
201
+ return internal::triangular_solve_retval<Side, TriangularViewType, Other>(derived(), other.derived());
202
+ }
203
+ #endif
204
+
205
+ namespace internal {
206
+
207
+ template <int Side, typename TriangularType, typename Rhs>
208
+ struct traits<triangular_solve_retval<Side, TriangularType, Rhs> > {
209
+ typedef typename internal::plain_matrix_type_column_major<Rhs>::type ReturnType;
210
+ };
211
+
212
+ template <int Side, typename TriangularType, typename Rhs>
213
+ struct triangular_solve_retval : public ReturnByValue<triangular_solve_retval<Side, TriangularType, Rhs> > {
214
+ typedef remove_all_t<typename Rhs::Nested> RhsNestedCleaned;
215
+ typedef ReturnByValue<triangular_solve_retval> Base;
216
+
217
+ triangular_solve_retval(const TriangularType& tri, const Rhs& rhs) : m_triangularMatrix(tri), m_rhs(rhs) {}
218
+
219
+ inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_rhs.rows(); }
220
+ inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
221
+
222
+ template <typename Dest>
223
+ inline void evalTo(Dest& dst) const {
224
+ if (!is_same_dense(dst, m_rhs)) dst = m_rhs;
225
+ m_triangularMatrix.template solveInPlace<Side>(dst);
226
+ }
227
+
228
+ protected:
229
+ const TriangularType& m_triangularMatrix;
230
+ typename Rhs::Nested m_rhs;
231
+ };
232
+
233
+ } // namespace internal
234
+
235
+ } // end namespace Eigen
236
+
237
+ #endif // EIGEN_SOLVETRIANGULAR_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/SolverBase.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SOLVERBASE_H
11
+ #define EIGEN_SOLVERBASE_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ template <typename Derived>
21
+ struct solve_assertion {
22
+ template <bool Transpose_, typename Rhs>
23
+ static void run(const Derived& solver, const Rhs& b) {
24
+ solver.template _check_solve_assertion<Transpose_>(b);
25
+ }
26
+ };
27
+
28
+ template <typename Derived>
29
+ struct solve_assertion<Transpose<Derived>> {
30
+ typedef Transpose<Derived> type;
31
+
32
+ template <bool Transpose_, typename Rhs>
33
+ static void run(const type& transpose, const Rhs& b) {
34
+ internal::solve_assertion<internal::remove_all_t<Derived>>::template run<true>(transpose.nestedExpression(), b);
35
+ }
36
+ };
37
+
38
+ template <typename Scalar, typename Derived>
39
+ struct solve_assertion<CwiseUnaryOp<Eigen::internal::scalar_conjugate_op<Scalar>, const Transpose<Derived>>> {
40
+ typedef CwiseUnaryOp<Eigen::internal::scalar_conjugate_op<Scalar>, const Transpose<Derived>> type;
41
+
42
+ template <bool Transpose_, typename Rhs>
43
+ static void run(const type& adjoint, const Rhs& b) {
44
+ internal::solve_assertion<internal::remove_all_t<Transpose<Derived>>>::template run<true>(
45
+ adjoint.nestedExpression(), b);
46
+ }
47
+ };
48
+ } // end namespace internal
49
+
50
+ /** \class SolverBase
51
+ * \brief A base class for matrix decomposition and solvers
52
+ *
53
+ * \tparam Derived the actual type of the decomposition/solver.
54
+ *
55
+ * Any matrix decomposition inheriting this base class provide the following API:
56
+ *
57
+ * \code
58
+ * MatrixType A, b, x;
59
+ * DecompositionType dec(A);
60
+ * x = dec.solve(b); // solve A * x = b
61
+ * x = dec.transpose().solve(b); // solve A^T * x = b
62
+ * x = dec.adjoint().solve(b); // solve A' * x = b
63
+ * \endcode
64
+ *
65
+ * \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation
66
+ * errors.
67
+ *
68
+ * \sa class PartialPivLU, class FullPivLU, class HouseholderQR, class ColPivHouseholderQR, class FullPivHouseholderQR,
69
+ * class CompleteOrthogonalDecomposition, class LLT, class LDLT, class SVDBase
70
+ */
71
+ template <typename Derived>
72
+ class SolverBase : public EigenBase<Derived> {
73
+ public:
74
+ typedef EigenBase<Derived> Base;
75
+ typedef typename internal::traits<Derived>::Scalar Scalar;
76
+ typedef Scalar CoeffReturnType;
77
+
78
+ template <typename Derived_>
79
+ friend struct internal::solve_assertion;
80
+
81
+ enum {
82
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
83
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
84
+ SizeAtCompileTime = (internal::size_of_xpr_at_compile_time<Derived>::ret),
85
+ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
86
+ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
87
+ MaxSizeAtCompileTime = internal::size_at_compile_time(internal::traits<Derived>::MaxRowsAtCompileTime,
88
+ internal::traits<Derived>::MaxColsAtCompileTime),
89
+ IsVectorAtCompileTime =
90
+ internal::traits<Derived>::MaxRowsAtCompileTime == 1 || internal::traits<Derived>::MaxColsAtCompileTime == 1,
91
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0
92
+ : bool(IsVectorAtCompileTime) ? 1
93
+ : 2
94
+ };
95
+
96
+ /** Default constructor */
97
+ SolverBase() {}
98
+
99
+ ~SolverBase() {}
100
+
101
+ using Base::derived;
102
+
103
+ /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
104
+ */
105
+ template <typename Rhs>
106
+ inline const Solve<Derived, Rhs> solve(const MatrixBase<Rhs>& b) const {
107
+ internal::solve_assertion<internal::remove_all_t<Derived>>::template run<false>(derived(), b);
108
+ return Solve<Derived, Rhs>(derived(), b.derived());
109
+ }
110
+
111
+ /** \internal the return type of transpose() */
112
+ typedef Transpose<const Derived> ConstTransposeReturnType;
113
+ /** \returns an expression of the transposed of the factored matrix.
114
+ *
115
+ * A typical usage is to solve for the transposed problem A^T x = b:
116
+ * \code x = dec.transpose().solve(b); \endcode
117
+ *
118
+ * \sa adjoint(), solve()
119
+ */
120
+ inline const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
121
+
122
+ /** \internal the return type of adjoint() */
123
+ typedef std::conditional_t<NumTraits<Scalar>::IsComplex,
124
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const ConstTransposeReturnType>,
125
+ const ConstTransposeReturnType>
126
+ AdjointReturnType;
127
+ /** \returns an expression of the adjoint of the factored matrix
128
+ *
129
+ * A typical usage is to solve for the adjoint problem A' x = b:
130
+ * \code x = dec.adjoint().solve(b); \endcode
131
+ *
132
+ * For real scalar types, this function is equivalent to transpose().
133
+ *
134
+ * \sa transpose(), solve()
135
+ */
136
+ inline const AdjointReturnType adjoint() const { return AdjointReturnType(derived().transpose()); }
137
+
138
+ protected:
139
+ template <bool Transpose_, typename Rhs>
140
+ void _check_solve_assertion(const Rhs& b) const {
141
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
142
+ eigen_assert(derived().m_isInitialized && "Solver is not initialized.");
143
+ eigen_assert((Transpose_ ? derived().cols() : derived().rows()) == b.rows() &&
144
+ "SolverBase::solve(): invalid number of rows of the right hand side matrix b");
145
+ }
146
+ };
147
+
148
+ namespace internal {
149
+
150
+ template <typename Derived>
151
+ struct generic_xpr_base<Derived, MatrixXpr, SolverStorage> {
152
+ typedef SolverBase<Derived> type;
153
+ };
154
+
155
+ } // end namespace internal
156
+
157
+ } // end namespace Eigen
158
+
159
+ #endif // EIGEN_SOLVERBASE_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/StlIterators.h ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_STLITERATORS_H
11
+ #define EIGEN_STLITERATORS_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ template <typename IteratorType>
21
+ struct indexed_based_stl_iterator_traits;
22
+
23
+ template <typename Derived>
24
+ class indexed_based_stl_iterator_base {
25
+ protected:
26
+ typedef indexed_based_stl_iterator_traits<Derived> traits;
27
+ typedef typename traits::XprType XprType;
28
+ typedef indexed_based_stl_iterator_base<typename traits::non_const_iterator> non_const_iterator;
29
+ typedef indexed_based_stl_iterator_base<typename traits::const_iterator> const_iterator;
30
+ typedef std::conditional_t<internal::is_const<XprType>::value, non_const_iterator, const_iterator> other_iterator;
31
+ // NOTE: in C++03 we cannot declare friend classes through typedefs because we need to write friend class:
32
+ friend class indexed_based_stl_iterator_base<typename traits::const_iterator>;
33
+ friend class indexed_based_stl_iterator_base<typename traits::non_const_iterator>;
34
+
35
+ public:
36
+ typedef Index difference_type;
37
+ typedef std::random_access_iterator_tag iterator_category;
38
+
39
+ indexed_based_stl_iterator_base() EIGEN_NO_THROW : mp_xpr(0), m_index(0) {}
40
+ indexed_based_stl_iterator_base(XprType& xpr, Index index) EIGEN_NO_THROW : mp_xpr(&xpr), m_index(index) {}
41
+
42
+ indexed_based_stl_iterator_base(const non_const_iterator& other) EIGEN_NO_THROW : mp_xpr(other.mp_xpr),
43
+ m_index(other.m_index) {}
44
+
45
+ indexed_based_stl_iterator_base& operator=(const non_const_iterator& other) {
46
+ mp_xpr = other.mp_xpr;
47
+ m_index = other.m_index;
48
+ return *this;
49
+ }
50
+
51
+ Derived& operator++() {
52
+ ++m_index;
53
+ return derived();
54
+ }
55
+ Derived& operator--() {
56
+ --m_index;
57
+ return derived();
58
+ }
59
+
60
+ Derived operator++(int) {
61
+ Derived prev(derived());
62
+ operator++();
63
+ return prev;
64
+ }
65
+ Derived operator--(int) {
66
+ Derived prev(derived());
67
+ operator--();
68
+ return prev;
69
+ }
70
+
71
+ friend Derived operator+(const indexed_based_stl_iterator_base& a, Index b) {
72
+ Derived ret(a.derived());
73
+ ret += b;
74
+ return ret;
75
+ }
76
+ friend Derived operator-(const indexed_based_stl_iterator_base& a, Index b) {
77
+ Derived ret(a.derived());
78
+ ret -= b;
79
+ return ret;
80
+ }
81
+ friend Derived operator+(Index a, const indexed_based_stl_iterator_base& b) {
82
+ Derived ret(b.derived());
83
+ ret += a;
84
+ return ret;
85
+ }
86
+ friend Derived operator-(Index a, const indexed_based_stl_iterator_base& b) {
87
+ Derived ret(b.derived());
88
+ ret -= a;
89
+ return ret;
90
+ }
91
+
92
+ Derived& operator+=(Index b) {
93
+ m_index += b;
94
+ return derived();
95
+ }
96
+ Derived& operator-=(Index b) {
97
+ m_index -= b;
98
+ return derived();
99
+ }
100
+
101
+ difference_type operator-(const indexed_based_stl_iterator_base& other) const {
102
+ eigen_assert(mp_xpr == other.mp_xpr);
103
+ return m_index - other.m_index;
104
+ }
105
+
106
+ difference_type operator-(const other_iterator& other) const {
107
+ eigen_assert(mp_xpr == other.mp_xpr);
108
+ return m_index - other.m_index;
109
+ }
110
+
111
+ bool operator==(const indexed_based_stl_iterator_base& other) const {
112
+ eigen_assert(mp_xpr == other.mp_xpr);
113
+ return m_index == other.m_index;
114
+ }
115
+ bool operator!=(const indexed_based_stl_iterator_base& other) const {
116
+ eigen_assert(mp_xpr == other.mp_xpr);
117
+ return m_index != other.m_index;
118
+ }
119
+ bool operator<(const indexed_based_stl_iterator_base& other) const {
120
+ eigen_assert(mp_xpr == other.mp_xpr);
121
+ return m_index < other.m_index;
122
+ }
123
+ bool operator<=(const indexed_based_stl_iterator_base& other) const {
124
+ eigen_assert(mp_xpr == other.mp_xpr);
125
+ return m_index <= other.m_index;
126
+ }
127
+ bool operator>(const indexed_based_stl_iterator_base& other) const {
128
+ eigen_assert(mp_xpr == other.mp_xpr);
129
+ return m_index > other.m_index;
130
+ }
131
+ bool operator>=(const indexed_based_stl_iterator_base& other) const {
132
+ eigen_assert(mp_xpr == other.mp_xpr);
133
+ return m_index >= other.m_index;
134
+ }
135
+
136
+ bool operator==(const other_iterator& other) const {
137
+ eigen_assert(mp_xpr == other.mp_xpr);
138
+ return m_index == other.m_index;
139
+ }
140
+ bool operator!=(const other_iterator& other) const {
141
+ eigen_assert(mp_xpr == other.mp_xpr);
142
+ return m_index != other.m_index;
143
+ }
144
+ bool operator<(const other_iterator& other) const {
145
+ eigen_assert(mp_xpr == other.mp_xpr);
146
+ return m_index < other.m_index;
147
+ }
148
+ bool operator<=(const other_iterator& other) const {
149
+ eigen_assert(mp_xpr == other.mp_xpr);
150
+ return m_index <= other.m_index;
151
+ }
152
+ bool operator>(const other_iterator& other) const {
153
+ eigen_assert(mp_xpr == other.mp_xpr);
154
+ return m_index > other.m_index;
155
+ }
156
+ bool operator>=(const other_iterator& other) const {
157
+ eigen_assert(mp_xpr == other.mp_xpr);
158
+ return m_index >= other.m_index;
159
+ }
160
+
161
+ protected:
162
+ Derived& derived() { return static_cast<Derived&>(*this); }
163
+ const Derived& derived() const { return static_cast<const Derived&>(*this); }
164
+
165
+ XprType* mp_xpr;
166
+ Index m_index;
167
+ };
168
+
169
+ template <typename Derived>
170
+ class indexed_based_stl_reverse_iterator_base {
171
+ protected:
172
+ typedef indexed_based_stl_iterator_traits<Derived> traits;
173
+ typedef typename traits::XprType XprType;
174
+ typedef indexed_based_stl_reverse_iterator_base<typename traits::non_const_iterator> non_const_iterator;
175
+ typedef indexed_based_stl_reverse_iterator_base<typename traits::const_iterator> const_iterator;
176
+ typedef std::conditional_t<internal::is_const<XprType>::value, non_const_iterator, const_iterator> other_iterator;
177
+ // NOTE: in C++03 we cannot declare friend classes through typedefs because we need to write friend class:
178
+ friend class indexed_based_stl_reverse_iterator_base<typename traits::const_iterator>;
179
+ friend class indexed_based_stl_reverse_iterator_base<typename traits::non_const_iterator>;
180
+
181
+ public:
182
+ typedef Index difference_type;
183
+ typedef std::random_access_iterator_tag iterator_category;
184
+
185
+ indexed_based_stl_reverse_iterator_base() : mp_xpr(0), m_index(0) {}
186
+ indexed_based_stl_reverse_iterator_base(XprType& xpr, Index index) : mp_xpr(&xpr), m_index(index) {}
187
+
188
+ indexed_based_stl_reverse_iterator_base(const non_const_iterator& other)
189
+ : mp_xpr(other.mp_xpr), m_index(other.m_index) {}
190
+
191
+ indexed_based_stl_reverse_iterator_base& operator=(const non_const_iterator& other) {
192
+ mp_xpr = other.mp_xpr;
193
+ m_index = other.m_index;
194
+ return *this;
195
+ }
196
+
197
+ Derived& operator++() {
198
+ --m_index;
199
+ return derived();
200
+ }
201
+ Derived& operator--() {
202
+ ++m_index;
203
+ return derived();
204
+ }
205
+
206
+ Derived operator++(int) {
207
+ Derived prev(derived());
208
+ operator++();
209
+ return prev;
210
+ }
211
+ Derived operator--(int) {
212
+ Derived prev(derived());
213
+ operator--();
214
+ return prev;
215
+ }
216
+
217
+ friend Derived operator+(const indexed_based_stl_reverse_iterator_base& a, Index b) {
218
+ Derived ret(a.derived());
219
+ ret += b;
220
+ return ret;
221
+ }
222
+ friend Derived operator-(const indexed_based_stl_reverse_iterator_base& a, Index b) {
223
+ Derived ret(a.derived());
224
+ ret -= b;
225
+ return ret;
226
+ }
227
+ friend Derived operator+(Index a, const indexed_based_stl_reverse_iterator_base& b) {
228
+ Derived ret(b.derived());
229
+ ret += a;
230
+ return ret;
231
+ }
232
+ friend Derived operator-(Index a, const indexed_based_stl_reverse_iterator_base& b) {
233
+ Derived ret(b.derived());
234
+ ret -= a;
235
+ return ret;
236
+ }
237
+
238
+ Derived& operator+=(Index b) {
239
+ m_index -= b;
240
+ return derived();
241
+ }
242
+ Derived& operator-=(Index b) {
243
+ m_index += b;
244
+ return derived();
245
+ }
246
+
247
+ difference_type operator-(const indexed_based_stl_reverse_iterator_base& other) const {
248
+ eigen_assert(mp_xpr == other.mp_xpr);
249
+ return other.m_index - m_index;
250
+ }
251
+
252
+ difference_type operator-(const other_iterator& other) const {
253
+ eigen_assert(mp_xpr == other.mp_xpr);
254
+ return other.m_index - m_index;
255
+ }
256
+
257
+ bool operator==(const indexed_based_stl_reverse_iterator_base& other) const {
258
+ eigen_assert(mp_xpr == other.mp_xpr);
259
+ return m_index == other.m_index;
260
+ }
261
+ bool operator!=(const indexed_based_stl_reverse_iterator_base& other) const {
262
+ eigen_assert(mp_xpr == other.mp_xpr);
263
+ return m_index != other.m_index;
264
+ }
265
+ bool operator<(const indexed_based_stl_reverse_iterator_base& other) const {
266
+ eigen_assert(mp_xpr == other.mp_xpr);
267
+ return m_index > other.m_index;
268
+ }
269
+ bool operator<=(const indexed_based_stl_reverse_iterator_base& other) const {
270
+ eigen_assert(mp_xpr == other.mp_xpr);
271
+ return m_index >= other.m_index;
272
+ }
273
+ bool operator>(const indexed_based_stl_reverse_iterator_base& other) const {
274
+ eigen_assert(mp_xpr == other.mp_xpr);
275
+ return m_index < other.m_index;
276
+ }
277
+ bool operator>=(const indexed_based_stl_reverse_iterator_base& other) const {
278
+ eigen_assert(mp_xpr == other.mp_xpr);
279
+ return m_index <= other.m_index;
280
+ }
281
+
282
+ bool operator==(const other_iterator& other) const {
283
+ eigen_assert(mp_xpr == other.mp_xpr);
284
+ return m_index == other.m_index;
285
+ }
286
+ bool operator!=(const other_iterator& other) const {
287
+ eigen_assert(mp_xpr == other.mp_xpr);
288
+ return m_index != other.m_index;
289
+ }
290
+ bool operator<(const other_iterator& other) const {
291
+ eigen_assert(mp_xpr == other.mp_xpr);
292
+ return m_index > other.m_index;
293
+ }
294
+ bool operator<=(const other_iterator& other) const {
295
+ eigen_assert(mp_xpr == other.mp_xpr);
296
+ return m_index >= other.m_index;
297
+ }
298
+ bool operator>(const other_iterator& other) const {
299
+ eigen_assert(mp_xpr == other.mp_xpr);
300
+ return m_index < other.m_index;
301
+ }
302
+ bool operator>=(const other_iterator& other) const {
303
+ eigen_assert(mp_xpr == other.mp_xpr);
304
+ return m_index <= other.m_index;
305
+ }
306
+
307
+ protected:
308
+ Derived& derived() { return static_cast<Derived&>(*this); }
309
+ const Derived& derived() const { return static_cast<const Derived&>(*this); }
310
+
311
+ XprType* mp_xpr;
312
+ Index m_index;
313
+ };
314
+
315
+ template <typename XprType>
316
+ class pointer_based_stl_iterator {
317
+ enum { is_lvalue = internal::is_lvalue<XprType>::value };
318
+ typedef pointer_based_stl_iterator<std::remove_const_t<XprType>> non_const_iterator;
319
+ typedef pointer_based_stl_iterator<std::add_const_t<XprType>> const_iterator;
320
+ typedef std::conditional_t<internal::is_const<XprType>::value, non_const_iterator, const_iterator> other_iterator;
321
+ // NOTE: in C++03 we cannot declare friend classes through typedefs because we need to write friend class:
322
+ friend class pointer_based_stl_iterator<std::add_const_t<XprType>>;
323
+ friend class pointer_based_stl_iterator<std::remove_const_t<XprType>>;
324
+
325
+ public:
326
+ typedef Index difference_type;
327
+ typedef typename XprType::Scalar value_type;
328
+ #if __cplusplus >= 202002L
329
+ typedef std::conditional_t<XprType::InnerStrideAtCompileTime == 1, std::contiguous_iterator_tag,
330
+ std::random_access_iterator_tag>
331
+ iterator_category;
332
+ #else
333
+ typedef std::random_access_iterator_tag iterator_category;
334
+ #endif
335
+ typedef std::conditional_t<bool(is_lvalue), value_type*, const value_type*> pointer;
336
+ typedef std::conditional_t<bool(is_lvalue), value_type&, const value_type&> reference;
337
+
338
+ pointer_based_stl_iterator() EIGEN_NO_THROW : m_ptr(0) {}
339
+ pointer_based_stl_iterator(XprType& xpr, Index index) EIGEN_NO_THROW : m_incr(xpr.innerStride()) {
340
+ m_ptr = xpr.data() + index * m_incr.value();
341
+ }
342
+
343
+ pointer_based_stl_iterator(const non_const_iterator& other) EIGEN_NO_THROW : m_ptr(other.m_ptr),
344
+ m_incr(other.m_incr) {}
345
+
346
+ pointer_based_stl_iterator& operator=(const non_const_iterator& other) EIGEN_NO_THROW {
347
+ m_ptr = other.m_ptr;
348
+ m_incr.setValue(other.m_incr);
349
+ return *this;
350
+ }
351
+
352
+ reference operator*() const { return *m_ptr; }
353
+ reference operator[](Index i) const { return *(m_ptr + i * m_incr.value()); }
354
+ pointer operator->() const { return m_ptr; }
355
+
356
+ pointer_based_stl_iterator& operator++() {
357
+ m_ptr += m_incr.value();
358
+ return *this;
359
+ }
360
+ pointer_based_stl_iterator& operator--() {
361
+ m_ptr -= m_incr.value();
362
+ return *this;
363
+ }
364
+
365
+ pointer_based_stl_iterator operator++(int) {
366
+ pointer_based_stl_iterator prev(*this);
367
+ operator++();
368
+ return prev;
369
+ }
370
+ pointer_based_stl_iterator operator--(int) {
371
+ pointer_based_stl_iterator prev(*this);
372
+ operator--();
373
+ return prev;
374
+ }
375
+
376
+ friend pointer_based_stl_iterator operator+(const pointer_based_stl_iterator& a, Index b) {
377
+ pointer_based_stl_iterator ret(a);
378
+ ret += b;
379
+ return ret;
380
+ }
381
+ friend pointer_based_stl_iterator operator-(const pointer_based_stl_iterator& a, Index b) {
382
+ pointer_based_stl_iterator ret(a);
383
+ ret -= b;
384
+ return ret;
385
+ }
386
+ friend pointer_based_stl_iterator operator+(Index a, const pointer_based_stl_iterator& b) {
387
+ pointer_based_stl_iterator ret(b);
388
+ ret += a;
389
+ return ret;
390
+ }
391
+ friend pointer_based_stl_iterator operator-(Index a, const pointer_based_stl_iterator& b) {
392
+ pointer_based_stl_iterator ret(b);
393
+ ret -= a;
394
+ return ret;
395
+ }
396
+
397
+ pointer_based_stl_iterator& operator+=(Index b) {
398
+ m_ptr += b * m_incr.value();
399
+ return *this;
400
+ }
401
+ pointer_based_stl_iterator& operator-=(Index b) {
402
+ m_ptr -= b * m_incr.value();
403
+ return *this;
404
+ }
405
+
406
+ difference_type operator-(const pointer_based_stl_iterator& other) const {
407
+ return (m_ptr - other.m_ptr) / m_incr.value();
408
+ }
409
+
410
+ difference_type operator-(const other_iterator& other) const { return (m_ptr - other.m_ptr) / m_incr.value(); }
411
+
412
+ bool operator==(const pointer_based_stl_iterator& other) const { return m_ptr == other.m_ptr; }
413
+ bool operator!=(const pointer_based_stl_iterator& other) const { return m_ptr != other.m_ptr; }
414
+ bool operator<(const pointer_based_stl_iterator& other) const { return m_ptr < other.m_ptr; }
415
+ bool operator<=(const pointer_based_stl_iterator& other) const { return m_ptr <= other.m_ptr; }
416
+ bool operator>(const pointer_based_stl_iterator& other) const { return m_ptr > other.m_ptr; }
417
+ bool operator>=(const pointer_based_stl_iterator& other) const { return m_ptr >= other.m_ptr; }
418
+
419
+ bool operator==(const other_iterator& other) const { return m_ptr == other.m_ptr; }
420
+ bool operator!=(const other_iterator& other) const { return m_ptr != other.m_ptr; }
421
+ bool operator<(const other_iterator& other) const { return m_ptr < other.m_ptr; }
422
+ bool operator<=(const other_iterator& other) const { return m_ptr <= other.m_ptr; }
423
+ bool operator>(const other_iterator& other) const { return m_ptr > other.m_ptr; }
424
+ bool operator>=(const other_iterator& other) const { return m_ptr >= other.m_ptr; }
425
+
426
+ protected:
427
+ pointer m_ptr;
428
+ internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_incr;
429
+ };
430
+
431
+ template <typename XprType_>
432
+ struct indexed_based_stl_iterator_traits<generic_randaccess_stl_iterator<XprType_>> {
433
+ typedef XprType_ XprType;
434
+ typedef generic_randaccess_stl_iterator<std::remove_const_t<XprType>> non_const_iterator;
435
+ typedef generic_randaccess_stl_iterator<std::add_const_t<XprType>> const_iterator;
436
+ };
437
+
438
+ template <typename XprType>
439
+ class generic_randaccess_stl_iterator
440
+ : public indexed_based_stl_iterator_base<generic_randaccess_stl_iterator<XprType>> {
441
+ public:
442
+ typedef typename XprType::Scalar value_type;
443
+
444
+ protected:
445
+ enum {
446
+ has_direct_access = (internal::traits<XprType>::Flags & DirectAccessBit) ? 1 : 0,
447
+ is_lvalue = internal::is_lvalue<XprType>::value
448
+ };
449
+
450
+ typedef indexed_based_stl_iterator_base<generic_randaccess_stl_iterator> Base;
451
+ using Base::m_index;
452
+ using Base::mp_xpr;
453
+
454
+ // TODO currently const Transpose/Reshape expressions never returns const references,
455
+ // so lets return by value too.
456
+ // typedef std::conditional_t<bool(has_direct_access), const value_type&, const value_type> read_only_ref_t;
457
+ typedef const value_type read_only_ref_t;
458
+
459
+ public:
460
+ typedef std::conditional_t<bool(is_lvalue), value_type*, const value_type*> pointer;
461
+ typedef std::conditional_t<bool(is_lvalue), value_type&, read_only_ref_t> reference;
462
+
463
+ generic_randaccess_stl_iterator() : Base() {}
464
+ generic_randaccess_stl_iterator(XprType& xpr, Index index) : Base(xpr, index) {}
465
+ generic_randaccess_stl_iterator(const typename Base::non_const_iterator& other) : Base(other) {}
466
+ using Base::operator=;
467
+
468
+ reference operator*() const { return (*mp_xpr)(m_index); }
469
+ reference operator[](Index i) const { return (*mp_xpr)(m_index + i); }
470
+ pointer operator->() const { return &((*mp_xpr)(m_index)); }
471
+ };
472
+
473
+ template <typename XprType_, DirectionType Direction>
474
+ struct indexed_based_stl_iterator_traits<subvector_stl_iterator<XprType_, Direction>> {
475
+ typedef XprType_ XprType;
476
+ typedef subvector_stl_iterator<std::remove_const_t<XprType>, Direction> non_const_iterator;
477
+ typedef subvector_stl_iterator<std::add_const_t<XprType>, Direction> const_iterator;
478
+ };
479
+
480
+ template <typename XprType, DirectionType Direction>
481
+ class subvector_stl_iterator : public indexed_based_stl_iterator_base<subvector_stl_iterator<XprType, Direction>> {
482
+ protected:
483
+ enum { is_lvalue = internal::is_lvalue<XprType>::value };
484
+
485
+ typedef indexed_based_stl_iterator_base<subvector_stl_iterator> Base;
486
+ using Base::m_index;
487
+ using Base::mp_xpr;
488
+
489
+ typedef std::conditional_t<Direction == Vertical, typename XprType::ColXpr, typename XprType::RowXpr> SubVectorType;
490
+ typedef std::conditional_t<Direction == Vertical, typename XprType::ConstColXpr, typename XprType::ConstRowXpr>
491
+ ConstSubVectorType;
492
+
493
+ public:
494
+ typedef std::conditional_t<bool(is_lvalue), SubVectorType, ConstSubVectorType> reference;
495
+ typedef typename reference::PlainObject value_type;
496
+
497
+ private:
498
+ class subvector_stl_iterator_ptr {
499
+ public:
500
+ subvector_stl_iterator_ptr(const reference& subvector) : m_subvector(subvector) {}
501
+ reference* operator->() { return &m_subvector; }
502
+
503
+ private:
504
+ reference m_subvector;
505
+ };
506
+
507
+ public:
508
+ typedef subvector_stl_iterator_ptr pointer;
509
+
510
+ subvector_stl_iterator() : Base() {}
511
+ subvector_stl_iterator(XprType& xpr, Index index) : Base(xpr, index) {}
512
+
513
+ reference operator*() const { return (*mp_xpr).template subVector<Direction>(m_index); }
514
+ reference operator[](Index i) const { return (*mp_xpr).template subVector<Direction>(m_index + i); }
515
+ pointer operator->() const { return (*mp_xpr).template subVector<Direction>(m_index); }
516
+ };
517
+
518
+ template <typename XprType_, DirectionType Direction>
519
+ struct indexed_based_stl_iterator_traits<subvector_stl_reverse_iterator<XprType_, Direction>> {
520
+ typedef XprType_ XprType;
521
+ typedef subvector_stl_reverse_iterator<std::remove_const_t<XprType>, Direction> non_const_iterator;
522
+ typedef subvector_stl_reverse_iterator<std::add_const_t<XprType>, Direction> const_iterator;
523
+ };
524
+
525
+ template <typename XprType, DirectionType Direction>
526
+ class subvector_stl_reverse_iterator
527
+ : public indexed_based_stl_reverse_iterator_base<subvector_stl_reverse_iterator<XprType, Direction>> {
528
+ protected:
529
+ enum { is_lvalue = internal::is_lvalue<XprType>::value };
530
+
531
+ typedef indexed_based_stl_reverse_iterator_base<subvector_stl_reverse_iterator> Base;
532
+ using Base::m_index;
533
+ using Base::mp_xpr;
534
+
535
+ typedef std::conditional_t<Direction == Vertical, typename XprType::ColXpr, typename XprType::RowXpr> SubVectorType;
536
+ typedef std::conditional_t<Direction == Vertical, typename XprType::ConstColXpr, typename XprType::ConstRowXpr>
537
+ ConstSubVectorType;
538
+
539
+ public:
540
+ typedef std::conditional_t<bool(is_lvalue), SubVectorType, ConstSubVectorType> reference;
541
+ typedef typename reference::PlainObject value_type;
542
+
543
+ private:
544
+ class subvector_stl_reverse_iterator_ptr {
545
+ public:
546
+ subvector_stl_reverse_iterator_ptr(const reference& subvector) : m_subvector(subvector) {}
547
+ reference* operator->() { return &m_subvector; }
548
+
549
+ private:
550
+ reference m_subvector;
551
+ };
552
+
553
+ public:
554
+ typedef subvector_stl_reverse_iterator_ptr pointer;
555
+
556
+ subvector_stl_reverse_iterator() : Base() {}
557
+ subvector_stl_reverse_iterator(XprType& xpr, Index index) : Base(xpr, index) {}
558
+
559
+ reference operator*() const { return (*mp_xpr).template subVector<Direction>(m_index); }
560
+ reference operator[](Index i) const { return (*mp_xpr).template subVector<Direction>(m_index + i); }
561
+ pointer operator->() const { return (*mp_xpr).template subVector<Direction>(m_index); }
562
+ };
563
+
564
+ } // namespace internal
565
+
566
+ /** returns an iterator to the first element of the 1D vector or array
567
+ * \only_for_vectors
568
+ * \sa end(), cbegin()
569
+ */
570
+ template <typename Derived>
571
+ inline typename DenseBase<Derived>::iterator DenseBase<Derived>::begin() {
572
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
573
+ return iterator(derived(), 0);
574
+ }
575
+
576
+ /** const version of begin() */
577
+ template <typename Derived>
578
+ inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::begin() const {
579
+ return cbegin();
580
+ }
581
+
582
+ /** returns a read-only const_iterator to the first element of the 1D vector or array
583
+ * \only_for_vectors
584
+ * \sa cend(), begin()
585
+ */
586
+ template <typename Derived>
587
+ inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::cbegin() const {
588
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
589
+ return const_iterator(derived(), 0);
590
+ }
591
+
592
+ /** returns an iterator to the element following the last element of the 1D vector or array
593
+ * \only_for_vectors
594
+ * \sa begin(), cend()
595
+ */
596
+ template <typename Derived>
597
+ inline typename DenseBase<Derived>::iterator DenseBase<Derived>::end() {
598
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
599
+ return iterator(derived(), size());
600
+ }
601
+
602
+ /** const version of end() */
603
+ template <typename Derived>
604
+ inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::end() const {
605
+ return cend();
606
+ }
607
+
608
+ /** returns a read-only const_iterator to the element following the last element of the 1D vector or array
609
+ * \only_for_vectors
610
+ * \sa begin(), cend()
611
+ */
612
+ template <typename Derived>
613
+ inline typename DenseBase<Derived>::const_iterator DenseBase<Derived>::cend() const {
614
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
615
+ return const_iterator(derived(), size());
616
+ }
617
+
618
+ } // namespace Eigen
619
+
620
+ #endif // EIGEN_STLITERATORS_H
infer_4_30_0/lib/python3.10/site-packages/tensorflow/include/Eigen/src/Core/Swap.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SWAP_H
11
+ #define EIGEN_SWAP_H
12
+
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ // Overload default assignPacket behavior for swapping them
21
+ template <typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT>
22
+ class generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT,
23
+ swap_assign_op<typename DstEvaluatorTypeT::Scalar>, Specialized>
24
+ : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT,
25
+ swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn> {
26
+ protected:
27
+ typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT,
28
+ swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn>
29
+ Base;
30
+ using Base::m_dst;
31
+ using Base::m_functor;
32
+ using Base::m_src;
33
+
34
+ public:
35
+ typedef typename Base::Scalar Scalar;
36
+ typedef typename Base::DstXprType DstXprType;
37
+ typedef swap_assign_op<Scalar> Functor;
38
+
39
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE generic_dense_assignment_kernel(DstEvaluatorTypeT &dst,
40
+ const SrcEvaluatorTypeT &src,
41
+ const Functor &func, DstXprType &dstExpr)
42
+ : Base(dst, src, func, dstExpr) {}
43
+
44
+ template <int StoreMode, int LoadMode, typename PacketType>
45
+ EIGEN_STRONG_INLINE void assignPacket(Index row, Index col) {
46
+ PacketType tmp = m_src.template packet<LoadMode, PacketType>(row, col);
47
+ const_cast<SrcEvaluatorTypeT &>(m_src).template writePacket<LoadMode>(
48
+ row, col, m_dst.template packet<StoreMode, PacketType>(row, col));
49
+ m_dst.template writePacket<StoreMode>(row, col, tmp);
50
+ }
51
+
52
+ template <int StoreMode, int LoadMode, typename PacketType>
53
+ EIGEN_STRONG_INLINE void assignPacket(Index index) {
54
+ PacketType tmp = m_src.template packet<LoadMode, PacketType>(index);
55
+ const_cast<SrcEvaluatorTypeT &>(m_src).template writePacket<LoadMode>(
56
+ index, m_dst.template packet<StoreMode, PacketType>(index));
57
+ m_dst.template writePacket<StoreMode>(index, tmp);
58
+ }
59
+
60
+ // TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I
61
+ // mean no CRTP (Gael)
62
+ template <int StoreMode, int LoadMode, typename PacketType>
63
+ EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner) {
64
+ Index row = Base::rowIndexByOuterInner(outer, inner);
65
+ Index col = Base::colIndexByOuterInner(outer, inner);
66
+ assignPacket<StoreMode, LoadMode, PacketType>(row, col);
67
+ }
68
+ };
69
+
70
+ } // namespace internal
71
+
72
+ } // end namespace Eigen
73
+
74
+ #endif // EIGEN_SWAP_H