Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- include/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +227 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
- include/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
- include/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
- include/eigen/Eigen/src/SVD/BDCSVD.h +1377 -0
- include/eigen/Eigen/src/SVD/JacobiSVD.h +813 -0
- include/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
- include/eigen/Eigen/src/SVD/SVDBase.h +376 -0
- include/eigen/Eigen/src/SVD/UpperBidiagonalization.h +415 -0
- include/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
- include/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- include/eigen/doc/CoeffwiseMathFunctionsTable.dox +600 -0
- include/eigen/doc/CustomizingEigen_CustomScalar.dox +120 -0
- include/eigen/doc/CustomizingEigen_InheritingMatrix.dox +34 -0
- include/eigen/doc/CustomizingEigen_Plugins.dox +69 -0
- include/eigen/doc/Doxyfile.in +180 -0
- include/eigen/doc/FixedSizeVectorizable.dox +38 -0
- include/eigen/doc/HiPerformance.dox +128 -0
- include/eigen/doc/InplaceDecomposition.dox +115 -0
- include/eigen/doc/LeastSquares.dox +75 -0
- include/eigen/doc/Manual.dox +191 -0
- include/eigen/doc/Overview.dox +32 -0
- include/eigen/doc/PassingByValue.dox +40 -0
- include/eigen/doc/Pitfalls.dox +149 -0
- include/eigen/doc/QuickStartGuide.dox +100 -0
- include/eigen/doc/SparseQuickReference.dox +272 -0
- include/eigen/doc/StlContainers.dox +73 -0
- include/eigen/doc/StorageOrders.dox +86 -0
- include/eigen/doc/TemplateKeyword.dox +133 -0
- include/eigen/doc/TopicAliasing.dox +237 -0
- include/eigen/doc/TopicCMakeGuide.dox +65 -0
- include/eigen/doc/TopicVectorization.dox +9 -0
- include/eigen/doc/TutorialAdvancedInitialization.dox +162 -0
- include/eigen/doc/TutorialBlockOperations.dox +242 -0
- include/eigen/doc/TutorialGeometry.dox +242 -0
- include/eigen/doc/TutorialLinearAlgebra.dox +299 -0
- include/eigen/doc/TutorialMatrixArithmetic.dox +214 -0
- include/eigen/doc/TutorialReductionsVisitorsBroadcasting.dox +266 -0
- include/eigen/doc/TutorialReshape.dox +82 -0
- include/eigen/doc/TutorialSTL.dox +66 -0
- include/eigen/doc/TutorialSlicingIndexing.dox +245 -0
- include/eigen/doc/UnalignedArrayAssert.dox +133 -0
- include/eigen/doc/UsingBlasLapackBackends.dox +133 -0
- include/eigen/doc/UsingNVCC.dox +30 -0
- include/eigen/doc/eigen_navtree_hacks.js +247 -0
include/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#ifndef EIGEN_BASIC_PRECONDITIONERS_H
|
| 11 |
+
#define EIGEN_BASIC_PRECONDITIONERS_H
|
| 12 |
+
|
| 13 |
+
namespace Eigen {
|
| 14 |
+
|
| 15 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 16 |
+
* \brief A preconditioner based on the digonal entries
|
| 17 |
+
*
|
| 18 |
+
* This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
|
| 19 |
+
* In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
|
| 20 |
+
\code
|
| 21 |
+
A.diagonal().asDiagonal() . x = b
|
| 22 |
+
\endcode
|
| 23 |
+
*
|
| 24 |
+
* \tparam _Scalar the type of the scalar.
|
| 25 |
+
*
|
| 26 |
+
* \implsparsesolverconcept
|
| 27 |
+
*
|
| 28 |
+
* This preconditioner is suitable for both selfadjoint and general problems.
|
| 29 |
+
* The diagonal entries are pre-inverted and stored into a dense vector.
|
| 30 |
+
*
|
| 31 |
+
* \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
|
| 32 |
+
*
|
| 33 |
+
* \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient
|
| 34 |
+
*/
|
| 35 |
+
template <typename _Scalar>
|
| 36 |
+
class DiagonalPreconditioner
|
| 37 |
+
{
|
| 38 |
+
typedef _Scalar Scalar;
|
| 39 |
+
typedef Matrix<Scalar,Dynamic,1> Vector;
|
| 40 |
+
public:
|
| 41 |
+
typedef typename Vector::StorageIndex StorageIndex;
|
| 42 |
+
enum {
|
| 43 |
+
ColsAtCompileTime = Dynamic,
|
| 44 |
+
MaxColsAtCompileTime = Dynamic
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
DiagonalPreconditioner() : m_isInitialized(false) {}
|
| 48 |
+
|
| 49 |
+
template<typename MatType>
|
| 50 |
+
explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())
|
| 51 |
+
{
|
| 52 |
+
compute(mat);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_invdiag.size(); }
|
| 56 |
+
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_invdiag.size(); }
|
| 57 |
+
|
| 58 |
+
template<typename MatType>
|
| 59 |
+
DiagonalPreconditioner& analyzePattern(const MatType& )
|
| 60 |
+
{
|
| 61 |
+
return *this;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
template<typename MatType>
|
| 65 |
+
DiagonalPreconditioner& factorize(const MatType& mat)
|
| 66 |
+
{
|
| 67 |
+
m_invdiag.resize(mat.cols());
|
| 68 |
+
for(int j=0; j<mat.outerSize(); ++j)
|
| 69 |
+
{
|
| 70 |
+
typename MatType::InnerIterator it(mat,j);
|
| 71 |
+
while(it && it.index()!=j) ++it;
|
| 72 |
+
if(it && it.index()==j && it.value()!=Scalar(0))
|
| 73 |
+
m_invdiag(j) = Scalar(1)/it.value();
|
| 74 |
+
else
|
| 75 |
+
m_invdiag(j) = Scalar(1);
|
| 76 |
+
}
|
| 77 |
+
m_isInitialized = true;
|
| 78 |
+
return *this;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template<typename MatType>
|
| 82 |
+
DiagonalPreconditioner& compute(const MatType& mat)
|
| 83 |
+
{
|
| 84 |
+
return factorize(mat);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/** \internal */
|
| 88 |
+
template<typename Rhs, typename Dest>
|
| 89 |
+
void _solve_impl(const Rhs& b, Dest& x) const
|
| 90 |
+
{
|
| 91 |
+
x = m_invdiag.array() * b.array() ;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs>
|
| 95 |
+
solve(const MatrixBase<Rhs>& b) const
|
| 96 |
+
{
|
| 97 |
+
eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
|
| 98 |
+
eigen_assert(m_invdiag.size()==b.rows()
|
| 99 |
+
&& "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
|
| 100 |
+
return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived());
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
ComputationInfo info() { return Success; }
|
| 104 |
+
|
| 105 |
+
protected:
|
| 106 |
+
Vector m_invdiag;
|
| 107 |
+
bool m_isInitialized;
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 111 |
+
* \brief Jacobi preconditioner for LeastSquaresConjugateGradient
|
| 112 |
+
*
|
| 113 |
+
* This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix.
|
| 114 |
+
* In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
|
| 115 |
+
\code
|
| 116 |
+
(A.adjoint() * A).diagonal().asDiagonal() * x = b
|
| 117 |
+
\endcode
|
| 118 |
+
*
|
| 119 |
+
* \tparam _Scalar the type of the scalar.
|
| 120 |
+
*
|
| 121 |
+
* \implsparsesolverconcept
|
| 122 |
+
*
|
| 123 |
+
* The diagonal entries are pre-inverted and stored into a dense vector.
|
| 124 |
+
*
|
| 125 |
+
* \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner
|
| 126 |
+
*/
|
| 127 |
+
template <typename _Scalar>
|
| 128 |
+
class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
|
| 129 |
+
{
|
| 130 |
+
typedef _Scalar Scalar;
|
| 131 |
+
typedef typename NumTraits<Scalar>::Real RealScalar;
|
| 132 |
+
typedef DiagonalPreconditioner<_Scalar> Base;
|
| 133 |
+
using Base::m_invdiag;
|
| 134 |
+
public:
|
| 135 |
+
|
| 136 |
+
LeastSquareDiagonalPreconditioner() : Base() {}
|
| 137 |
+
|
| 138 |
+
template<typename MatType>
|
| 139 |
+
explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base()
|
| 140 |
+
{
|
| 141 |
+
compute(mat);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
template<typename MatType>
|
| 145 |
+
LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& )
|
| 146 |
+
{
|
| 147 |
+
return *this;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
template<typename MatType>
|
| 151 |
+
LeastSquareDiagonalPreconditioner& factorize(const MatType& mat)
|
| 152 |
+
{
|
| 153 |
+
// Compute the inverse squared-norm of each column of mat
|
| 154 |
+
m_invdiag.resize(mat.cols());
|
| 155 |
+
if(MatType::IsRowMajor)
|
| 156 |
+
{
|
| 157 |
+
m_invdiag.setZero();
|
| 158 |
+
for(Index j=0; j<mat.outerSize(); ++j)
|
| 159 |
+
{
|
| 160 |
+
for(typename MatType::InnerIterator it(mat,j); it; ++it)
|
| 161 |
+
m_invdiag(it.index()) += numext::abs2(it.value());
|
| 162 |
+
}
|
| 163 |
+
for(Index j=0; j<mat.cols(); ++j)
|
| 164 |
+
if(numext::real(m_invdiag(j))>RealScalar(0))
|
| 165 |
+
m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j));
|
| 166 |
+
}
|
| 167 |
+
else
|
| 168 |
+
{
|
| 169 |
+
for(Index j=0; j<mat.outerSize(); ++j)
|
| 170 |
+
{
|
| 171 |
+
RealScalar sum = mat.col(j).squaredNorm();
|
| 172 |
+
if(sum>RealScalar(0))
|
| 173 |
+
m_invdiag(j) = RealScalar(1)/sum;
|
| 174 |
+
else
|
| 175 |
+
m_invdiag(j) = RealScalar(1);
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
Base::m_isInitialized = true;
|
| 179 |
+
return *this;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
template<typename MatType>
|
| 183 |
+
LeastSquareDiagonalPreconditioner& compute(const MatType& mat)
|
| 184 |
+
{
|
| 185 |
+
return factorize(mat);
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
ComputationInfo info() { return Success; }
|
| 189 |
+
|
| 190 |
+
protected:
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 194 |
+
* \brief A naive preconditioner which approximates any matrix as the identity matrix
|
| 195 |
+
*
|
| 196 |
+
* \implsparsesolverconcept
|
| 197 |
+
*
|
| 198 |
+
* \sa class DiagonalPreconditioner
|
| 199 |
+
*/
|
| 200 |
+
class IdentityPreconditioner
|
| 201 |
+
{
|
| 202 |
+
public:
|
| 203 |
+
|
| 204 |
+
IdentityPreconditioner() {}
|
| 205 |
+
|
| 206 |
+
template<typename MatrixType>
|
| 207 |
+
explicit IdentityPreconditioner(const MatrixType& ) {}
|
| 208 |
+
|
| 209 |
+
template<typename MatrixType>
|
| 210 |
+
IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
|
| 211 |
+
|
| 212 |
+
template<typename MatrixType>
|
| 213 |
+
IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
|
| 214 |
+
|
| 215 |
+
template<typename MatrixType>
|
| 216 |
+
IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
|
| 217 |
+
|
| 218 |
+
template<typename Rhs>
|
| 219 |
+
inline const Rhs& solve(const Rhs& b) const { return b; }
|
| 220 |
+
|
| 221 |
+
ComputationInfo info() { return Success; }
|
| 222 |
+
};
|
| 223 |
+
|
| 224 |
+
} // end namespace Eigen
|
| 225 |
+
|
| 226 |
+
#endif // EIGEN_BASIC_PRECONDITIONERS_H
|
include/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
| 6 |
+
//
|
| 7 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 8 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 9 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 10 |
+
|
| 11 |
+
#ifndef EIGEN_BICGSTAB_H
|
| 12 |
+
#define EIGEN_BICGSTAB_H
|
| 13 |
+
|
| 14 |
+
namespace Eigen {
|
| 15 |
+
|
| 16 |
+
namespace internal {
|
| 17 |
+
|
| 18 |
+
/** \internal Low-level bi conjugate gradient stabilized algorithm
|
| 19 |
+
* \param mat The matrix A
|
| 20 |
+
* \param rhs The right hand side vector b
|
| 21 |
+
* \param x On input and initial solution, on output the computed solution.
|
| 22 |
+
* \param precond A preconditioner being able to efficiently solve for an
|
| 23 |
+
* approximation of Ax=b (regardless of b)
|
| 24 |
+
* \param iters On input the max number of iteration, on output the number of performed iterations.
|
| 25 |
+
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
|
| 26 |
+
* \return false in the case of numerical issue, for example a break down of BiCGSTAB.
|
| 27 |
+
*/
|
| 28 |
+
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
| 29 |
+
bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
| 30 |
+
const Preconditioner& precond, Index& iters,
|
| 31 |
+
typename Dest::RealScalar& tol_error)
|
| 32 |
+
{
|
| 33 |
+
using std::sqrt;
|
| 34 |
+
using std::abs;
|
| 35 |
+
typedef typename Dest::RealScalar RealScalar;
|
| 36 |
+
typedef typename Dest::Scalar Scalar;
|
| 37 |
+
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
| 38 |
+
RealScalar tol = tol_error;
|
| 39 |
+
Index maxIters = iters;
|
| 40 |
+
|
| 41 |
+
Index n = mat.cols();
|
| 42 |
+
VectorType r = rhs - mat * x;
|
| 43 |
+
VectorType r0 = r;
|
| 44 |
+
|
| 45 |
+
RealScalar r0_sqnorm = r0.squaredNorm();
|
| 46 |
+
RealScalar rhs_sqnorm = rhs.squaredNorm();
|
| 47 |
+
if(rhs_sqnorm == 0)
|
| 48 |
+
{
|
| 49 |
+
x.setZero();
|
| 50 |
+
return true;
|
| 51 |
+
}
|
| 52 |
+
Scalar rho (1);
|
| 53 |
+
Scalar alpha (1);
|
| 54 |
+
Scalar w (1);
|
| 55 |
+
|
| 56 |
+
VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
|
| 57 |
+
VectorType y(n), z(n);
|
| 58 |
+
VectorType kt(n), ks(n);
|
| 59 |
+
|
| 60 |
+
VectorType s(n), t(n);
|
| 61 |
+
|
| 62 |
+
RealScalar tol2 = tol*tol*rhs_sqnorm;
|
| 63 |
+
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
|
| 64 |
+
Index i = 0;
|
| 65 |
+
Index restarts = 0;
|
| 66 |
+
|
| 67 |
+
while ( r.squaredNorm() > tol2 && i<maxIters )
|
| 68 |
+
{
|
| 69 |
+
Scalar rho_old = rho;
|
| 70 |
+
|
| 71 |
+
rho = r0.dot(r);
|
| 72 |
+
if (abs(rho) < eps2*r0_sqnorm)
|
| 73 |
+
{
|
| 74 |
+
// The new residual vector became too orthogonal to the arbitrarily chosen direction r0
|
| 75 |
+
// Let's restart with a new r0:
|
| 76 |
+
r = rhs - mat * x;
|
| 77 |
+
r0 = r;
|
| 78 |
+
rho = r0_sqnorm = r.squaredNorm();
|
| 79 |
+
if(restarts++ == 0)
|
| 80 |
+
i = 0;
|
| 81 |
+
}
|
| 82 |
+
Scalar beta = (rho/rho_old) * (alpha / w);
|
| 83 |
+
p = r + beta * (p - w * v);
|
| 84 |
+
|
| 85 |
+
y = precond.solve(p);
|
| 86 |
+
|
| 87 |
+
v.noalias() = mat * y;
|
| 88 |
+
|
| 89 |
+
alpha = rho / r0.dot(v);
|
| 90 |
+
s = r - alpha * v;
|
| 91 |
+
|
| 92 |
+
z = precond.solve(s);
|
| 93 |
+
t.noalias() = mat * z;
|
| 94 |
+
|
| 95 |
+
RealScalar tmp = t.squaredNorm();
|
| 96 |
+
if(tmp>RealScalar(0))
|
| 97 |
+
w = t.dot(s) / tmp;
|
| 98 |
+
else
|
| 99 |
+
w = Scalar(0);
|
| 100 |
+
x += alpha * y + w * z;
|
| 101 |
+
r = s - w * t;
|
| 102 |
+
++i;
|
| 103 |
+
}
|
| 104 |
+
tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
|
| 105 |
+
iters = i;
|
| 106 |
+
return true;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template< typename _MatrixType,
|
| 112 |
+
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
| 113 |
+
class BiCGSTAB;
|
| 114 |
+
|
| 115 |
+
namespace internal {
|
| 116 |
+
|
| 117 |
+
template< typename _MatrixType, typename _Preconditioner>
|
| 118 |
+
struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
|
| 119 |
+
{
|
| 120 |
+
typedef _MatrixType MatrixType;
|
| 121 |
+
typedef _Preconditioner Preconditioner;
|
| 122 |
+
};
|
| 123 |
+
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 127 |
+
* \brief A bi conjugate gradient stabilized solver for sparse square problems
|
| 128 |
+
*
|
| 129 |
+
* This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
|
| 130 |
+
* stabilized algorithm. The vectors x and b can be either dense or sparse.
|
| 131 |
+
*
|
| 132 |
+
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
| 133 |
+
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
| 134 |
+
*
|
| 135 |
+
* \implsparsesolverconcept
|
| 136 |
+
*
|
| 137 |
+
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
| 138 |
+
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
| 139 |
+
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
| 140 |
+
*
|
| 141 |
+
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
|
| 142 |
+
*
|
| 143 |
+
* \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
|
| 144 |
+
* Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
|
| 145 |
+
* See \ref TopicMultiThreading for details.
|
| 146 |
+
*
|
| 147 |
+
* This class can be used as the direct solver classes. Here is a typical usage example:
|
| 148 |
+
* \include BiCGSTAB_simple.cpp
|
| 149 |
+
*
|
| 150 |
+
* By default the iterations start with x=0 as an initial guess of the solution.
|
| 151 |
+
* One can control the start using the solveWithGuess() method.
|
| 152 |
+
*
|
| 153 |
+
* BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
| 154 |
+
*
|
| 155 |
+
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
| 156 |
+
*/
|
| 157 |
+
template< typename _MatrixType, typename _Preconditioner>
|
| 158 |
+
class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
|
| 159 |
+
{
|
| 160 |
+
typedef IterativeSolverBase<BiCGSTAB> Base;
|
| 161 |
+
using Base::matrix;
|
| 162 |
+
using Base::m_error;
|
| 163 |
+
using Base::m_iterations;
|
| 164 |
+
using Base::m_info;
|
| 165 |
+
using Base::m_isInitialized;
|
| 166 |
+
public:
|
| 167 |
+
typedef _MatrixType MatrixType;
|
| 168 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 169 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 170 |
+
typedef _Preconditioner Preconditioner;
|
| 171 |
+
|
| 172 |
+
public:
|
| 173 |
+
|
| 174 |
+
/** Default constructor. */
|
| 175 |
+
BiCGSTAB() : Base() {}
|
| 176 |
+
|
| 177 |
+
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
| 178 |
+
*
|
| 179 |
+
* This constructor is a shortcut for the default constructor followed
|
| 180 |
+
* by a call to compute().
|
| 181 |
+
*
|
| 182 |
+
* \warning this class stores a reference to the matrix A as well as some
|
| 183 |
+
* precomputed values that depend on it. Therefore, if \a A is changed
|
| 184 |
+
* this class becomes invalid. Call compute() to update it with the new
|
| 185 |
+
* matrix A, or modify a copy of A.
|
| 186 |
+
*/
|
| 187 |
+
template<typename MatrixDerived>
|
| 188 |
+
explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
| 189 |
+
|
| 190 |
+
~BiCGSTAB() {}
|
| 191 |
+
|
| 192 |
+
/** \internal */
|
| 193 |
+
template<typename Rhs,typename Dest>
|
| 194 |
+
void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
|
| 195 |
+
{
|
| 196 |
+
m_iterations = Base::maxIterations();
|
| 197 |
+
m_error = Base::m_tolerance;
|
| 198 |
+
|
| 199 |
+
bool ret = internal::bicgstab(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error);
|
| 200 |
+
|
| 201 |
+
m_info = (!ret) ? NumericalIssue
|
| 202 |
+
: m_error <= Base::m_tolerance ? Success
|
| 203 |
+
: NoConvergence;
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
protected:
|
| 207 |
+
|
| 208 |
+
};
|
| 209 |
+
|
| 210 |
+
} // end namespace Eigen
|
| 211 |
+
|
| 212 |
+
#endif // EIGEN_BICGSTAB_H
|
include/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#ifndef EIGEN_CONJUGATE_GRADIENT_H
|
| 11 |
+
#define EIGEN_CONJUGATE_GRADIENT_H
|
| 12 |
+
|
| 13 |
+
namespace Eigen {
|
| 14 |
+
|
| 15 |
+
namespace internal {
|
| 16 |
+
|
| 17 |
+
/** \internal Low-level conjugate gradient algorithm
|
| 18 |
+
* \param mat The matrix A
|
| 19 |
+
* \param rhs The right hand side vector b
|
| 20 |
+
* \param x On input and initial solution, on output the computed solution.
|
| 21 |
+
* \param precond A preconditioner being able to efficiently solve for an
|
| 22 |
+
* approximation of Ax=b (regardless of b)
|
| 23 |
+
* \param iters On input the max number of iteration, on output the number of performed iterations.
|
| 24 |
+
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
|
| 25 |
+
*/
|
| 26 |
+
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
| 27 |
+
EIGEN_DONT_INLINE
|
| 28 |
+
void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
| 29 |
+
const Preconditioner& precond, Index& iters,
|
| 30 |
+
typename Dest::RealScalar& tol_error)
|
| 31 |
+
{
|
| 32 |
+
typedef typename Dest::RealScalar RealScalar;
|
| 33 |
+
typedef typename Dest::Scalar Scalar;
|
| 34 |
+
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
| 35 |
+
|
| 36 |
+
RealScalar tol = tol_error;
|
| 37 |
+
Index maxIters = iters;
|
| 38 |
+
|
| 39 |
+
Index n = mat.cols();
|
| 40 |
+
|
| 41 |
+
VectorType residual = rhs - mat * x; //initial residual
|
| 42 |
+
|
| 43 |
+
RealScalar rhsNorm2 = rhs.squaredNorm();
|
| 44 |
+
if(rhsNorm2 == 0)
|
| 45 |
+
{
|
| 46 |
+
x.setZero();
|
| 47 |
+
iters = 0;
|
| 48 |
+
tol_error = 0;
|
| 49 |
+
return;
|
| 50 |
+
}
|
| 51 |
+
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
|
| 52 |
+
RealScalar threshold = numext::maxi(RealScalar(tol*tol*rhsNorm2),considerAsZero);
|
| 53 |
+
RealScalar residualNorm2 = residual.squaredNorm();
|
| 54 |
+
if (residualNorm2 < threshold)
|
| 55 |
+
{
|
| 56 |
+
iters = 0;
|
| 57 |
+
tol_error = numext::sqrt(residualNorm2 / rhsNorm2);
|
| 58 |
+
return;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
VectorType p(n);
|
| 62 |
+
p = precond.solve(residual); // initial search direction
|
| 63 |
+
|
| 64 |
+
VectorType z(n), tmp(n);
|
| 65 |
+
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
|
| 66 |
+
Index i = 0;
|
| 67 |
+
while(i < maxIters)
|
| 68 |
+
{
|
| 69 |
+
tmp.noalias() = mat * p; // the bottleneck of the algorithm
|
| 70 |
+
|
| 71 |
+
Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
|
| 72 |
+
x += alpha * p; // update solution
|
| 73 |
+
residual -= alpha * tmp; // update residual
|
| 74 |
+
|
| 75 |
+
residualNorm2 = residual.squaredNorm();
|
| 76 |
+
if(residualNorm2 < threshold)
|
| 77 |
+
break;
|
| 78 |
+
|
| 79 |
+
z = precond.solve(residual); // approximately solve for "A z = residual"
|
| 80 |
+
|
| 81 |
+
RealScalar absOld = absNew;
|
| 82 |
+
absNew = numext::real(residual.dot(z)); // update the absolute value of r
|
| 83 |
+
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
|
| 84 |
+
p = z + beta * p; // update search direction
|
| 85 |
+
i++;
|
| 86 |
+
}
|
| 87 |
+
tol_error = numext::sqrt(residualNorm2 / rhsNorm2);
|
| 88 |
+
iters = i;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template< typename _MatrixType, int _UpLo=Lower,
|
| 94 |
+
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
| 95 |
+
class ConjugateGradient;
|
| 96 |
+
|
| 97 |
+
namespace internal {
|
| 98 |
+
|
| 99 |
+
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
| 100 |
+
struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
| 101 |
+
{
|
| 102 |
+
typedef _MatrixType MatrixType;
|
| 103 |
+
typedef _Preconditioner Preconditioner;
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 109 |
+
* \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems
|
| 110 |
+
*
|
| 111 |
+
* This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm.
|
| 112 |
+
* The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse.
|
| 113 |
+
*
|
| 114 |
+
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
|
| 115 |
+
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
|
| 116 |
+
* \c Upper, or \c Lower|Upper in which the full matrix entries will be considered.
|
| 117 |
+
* Default is \c Lower, best performance is \c Lower|Upper.
|
| 118 |
+
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
| 119 |
+
*
|
| 120 |
+
* \implsparsesolverconcept
|
| 121 |
+
*
|
| 122 |
+
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
| 123 |
+
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
| 124 |
+
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
| 125 |
+
*
|
| 126 |
+
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
|
| 127 |
+
*
|
| 128 |
+
* \b Performance: Even though the default value of \c _UpLo is \c Lower, significantly higher performance is
|
| 129 |
+
* achieved when using a complete matrix and \b Lower|Upper as the \a _UpLo template parameter. Moreover, in this
|
| 130 |
+
* case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
|
| 131 |
+
* See \ref TopicMultiThreading for details.
|
| 132 |
+
*
|
| 133 |
+
* This class can be used as the direct solver classes. Here is a typical usage example:
|
| 134 |
+
\code
|
| 135 |
+
int n = 10000;
|
| 136 |
+
VectorXd x(n), b(n);
|
| 137 |
+
SparseMatrix<double> A(n,n);
|
| 138 |
+
// fill A and b
|
| 139 |
+
ConjugateGradient<SparseMatrix<double>, Lower|Upper> cg;
|
| 140 |
+
cg.compute(A);
|
| 141 |
+
x = cg.solve(b);
|
| 142 |
+
std::cout << "#iterations: " << cg.iterations() << std::endl;
|
| 143 |
+
std::cout << "estimated error: " << cg.error() << std::endl;
|
| 144 |
+
// update b, and solve again
|
| 145 |
+
x = cg.solve(b);
|
| 146 |
+
\endcode
|
| 147 |
+
*
|
| 148 |
+
* By default the iterations start with x=0 as an initial guess of the solution.
|
| 149 |
+
* One can control the start using the solveWithGuess() method.
|
| 150 |
+
*
|
| 151 |
+
* ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
| 152 |
+
*
|
| 153 |
+
* \sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
| 154 |
+
*/
|
| 155 |
+
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
| 156 |
+
class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
| 157 |
+
{
|
| 158 |
+
typedef IterativeSolverBase<ConjugateGradient> Base;
|
| 159 |
+
using Base::matrix;
|
| 160 |
+
using Base::m_error;
|
| 161 |
+
using Base::m_iterations;
|
| 162 |
+
using Base::m_info;
|
| 163 |
+
using Base::m_isInitialized;
|
| 164 |
+
public:
|
| 165 |
+
typedef _MatrixType MatrixType;
|
| 166 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 167 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 168 |
+
typedef _Preconditioner Preconditioner;
|
| 169 |
+
|
| 170 |
+
enum {
|
| 171 |
+
UpLo = _UpLo
|
| 172 |
+
};
|
| 173 |
+
|
| 174 |
+
public:
|
| 175 |
+
|
| 176 |
+
/** Default constructor. */
|
| 177 |
+
ConjugateGradient() : Base() {}
|
| 178 |
+
|
| 179 |
+
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
| 180 |
+
*
|
| 181 |
+
* This constructor is a shortcut for the default constructor followed
|
| 182 |
+
* by a call to compute().
|
| 183 |
+
*
|
| 184 |
+
* \warning this class stores a reference to the matrix A as well as some
|
| 185 |
+
* precomputed values that depend on it. Therefore, if \a A is changed
|
| 186 |
+
* this class becomes invalid. Call compute() to update it with the new
|
| 187 |
+
* matrix A, or modify a copy of A.
|
| 188 |
+
*/
|
| 189 |
+
template<typename MatrixDerived>
|
| 190 |
+
explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
| 191 |
+
|
| 192 |
+
~ConjugateGradient() {}
|
| 193 |
+
|
| 194 |
+
/** \internal */
|
| 195 |
+
template<typename Rhs,typename Dest>
|
| 196 |
+
void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
|
| 197 |
+
{
|
| 198 |
+
typedef typename Base::MatrixWrapper MatrixWrapper;
|
| 199 |
+
typedef typename Base::ActualMatrixType ActualMatrixType;
|
| 200 |
+
enum {
|
| 201 |
+
TransposeInput = (!MatrixWrapper::MatrixFree)
|
| 202 |
+
&& (UpLo==(Lower|Upper))
|
| 203 |
+
&& (!MatrixType::IsRowMajor)
|
| 204 |
+
&& (!NumTraits<Scalar>::IsComplex)
|
| 205 |
+
};
|
| 206 |
+
typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
|
| 207 |
+
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
|
| 208 |
+
typedef typename internal::conditional<UpLo==(Lower|Upper),
|
| 209 |
+
RowMajorWrapper,
|
| 210 |
+
typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
|
| 211 |
+
>::type SelfAdjointWrapper;
|
| 212 |
+
|
| 213 |
+
m_iterations = Base::maxIterations();
|
| 214 |
+
m_error = Base::m_tolerance;
|
| 215 |
+
|
| 216 |
+
RowMajorWrapper row_mat(matrix());
|
| 217 |
+
internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b, x, Base::m_preconditioner, m_iterations, m_error);
|
| 218 |
+
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
protected:
|
| 222 |
+
|
| 223 |
+
};
|
| 224 |
+
|
| 225 |
+
} // end namespace Eigen
|
| 226 |
+
|
| 227 |
+
#endif // EIGEN_CONJUGATE_GRADIENT_H
|
include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
| 5 |
+
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 6 |
+
//
|
| 7 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 8 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 9 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 10 |
+
|
| 11 |
+
#ifndef EIGEN_INCOMPLETE_CHOlESKY_H
|
| 12 |
+
#define EIGEN_INCOMPLETE_CHOlESKY_H
|
| 13 |
+
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <list>
|
| 16 |
+
|
| 17 |
+
namespace Eigen {
|
| 18 |
+
/**
|
| 19 |
+
* \brief Modified Incomplete Cholesky with dual threshold
|
| 20 |
+
*
|
| 21 |
+
* References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
|
| 22 |
+
* Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
|
| 23 |
+
*
|
| 24 |
+
* \tparam Scalar the scalar type of the input matrices
|
| 25 |
+
* \tparam _UpLo The triangular part that will be used for the computations. It can be Lower
|
| 26 |
+
* or Upper. Default is Lower.
|
| 27 |
+
* \tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<int>,
|
| 28 |
+
* unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering<int>.
|
| 29 |
+
*
|
| 30 |
+
* \implsparsesolverconcept
|
| 31 |
+
*
|
| 32 |
+
* It performs the following incomplete factorization: \f$ S P A P' S \approx L L' \f$
|
| 33 |
+
* where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a
|
| 34 |
+
* fill-in reducing permutation as computed by the ordering method.
|
| 35 |
+
*
|
| 36 |
+
* \b Shifting \b strategy: Let \f$ B = S P A P' S \f$ be the scaled matrix on which the factorization is carried out,
|
| 37 |
+
* and \f$ \beta \f$ be the minimum value of the diagonal. If \f$ \beta > 0 \f$ then, the factorization is directly performed
|
| 38 |
+
* on the matrix B. Otherwise, the factorization is performed on the shifted matrix \f$ B + (\sigma+|\beta| I \f$ where
|
| 39 |
+
* \f$ \sigma \f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \f$ \sigma = 10^{-3} \f$.
|
| 40 |
+
* If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by
|
| 41 |
+
* the info() method, then you can either increase the initial shift, or better use another preconditioning technique.
|
| 42 |
+
*
|
| 43 |
+
*/
|
| 44 |
+
template <typename Scalar, int _UpLo = Lower, typename _OrderingType = AMDOrdering<int> >
|
| 45 |
+
class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> >
|
| 46 |
+
{
|
| 47 |
+
protected:
|
| 48 |
+
typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base;
|
| 49 |
+
using Base::m_isInitialized;
|
| 50 |
+
public:
|
| 51 |
+
typedef typename NumTraits<Scalar>::Real RealScalar;
|
| 52 |
+
typedef _OrderingType OrderingType;
|
| 53 |
+
typedef typename OrderingType::PermutationType PermutationType;
|
| 54 |
+
typedef typename PermutationType::StorageIndex StorageIndex;
|
| 55 |
+
typedef SparseMatrix<Scalar,ColMajor,StorageIndex> FactorType;
|
| 56 |
+
typedef Matrix<Scalar,Dynamic,1> VectorSx;
|
| 57 |
+
typedef Matrix<RealScalar,Dynamic,1> VectorRx;
|
| 58 |
+
typedef Matrix<StorageIndex,Dynamic, 1> VectorIx;
|
| 59 |
+
typedef std::vector<std::list<StorageIndex> > VectorList;
|
| 60 |
+
enum { UpLo = _UpLo };
|
| 61 |
+
enum {
|
| 62 |
+
ColsAtCompileTime = Dynamic,
|
| 63 |
+
MaxColsAtCompileTime = Dynamic
|
| 64 |
+
};
|
| 65 |
+
public:
|
| 66 |
+
|
| 67 |
+
/** Default constructor leaving the object in a partly non-initialized stage.
|
| 68 |
+
*
|
| 69 |
+
* You must call compute() or the pair analyzePattern()/factorize() to make it valid.
|
| 70 |
+
*
|
| 71 |
+
* \sa IncompleteCholesky(const MatrixType&)
|
| 72 |
+
*/
|
| 73 |
+
IncompleteCholesky() : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false) {}
|
| 74 |
+
|
| 75 |
+
/** Constructor computing the incomplete factorization for the given matrix \a matrix.
|
| 76 |
+
*/
|
| 77 |
+
template<typename MatrixType>
|
| 78 |
+
IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false)
|
| 79 |
+
{
|
| 80 |
+
compute(matrix);
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
/** \returns number of rows of the factored matrix */
|
| 84 |
+
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_L.rows(); }
|
| 85 |
+
|
| 86 |
+
/** \returns number of columns of the factored matrix */
|
| 87 |
+
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_L.cols(); }
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
/** \brief Reports whether previous computation was successful.
|
| 91 |
+
*
|
| 92 |
+
* It triggers an assertion if \c *this has not been initialized through the respective constructor,
|
| 93 |
+
* or a call to compute() or analyzePattern().
|
| 94 |
+
*
|
| 95 |
+
* \returns \c Success if computation was successful,
|
| 96 |
+
* \c NumericalIssue if the matrix appears to be negative.
|
| 97 |
+
*/
|
| 98 |
+
ComputationInfo info() const
|
| 99 |
+
{
|
| 100 |
+
eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized.");
|
| 101 |
+
return m_info;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
/** \brief Set the initial shift parameter \f$ \sigma \f$.
|
| 105 |
+
*/
|
| 106 |
+
void setInitialShift(RealScalar shift) { m_initialShift = shift; }
|
| 107 |
+
|
| 108 |
+
/** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat
|
| 109 |
+
*/
|
| 110 |
+
template<typename MatrixType>
|
| 111 |
+
void analyzePattern(const MatrixType& mat)
|
| 112 |
+
{
|
| 113 |
+
OrderingType ord;
|
| 114 |
+
PermutationType pinv;
|
| 115 |
+
ord(mat.template selfadjointView<UpLo>(), pinv);
|
| 116 |
+
if(pinv.size()>0) m_perm = pinv.inverse();
|
| 117 |
+
else m_perm.resize(0);
|
| 118 |
+
m_L.resize(mat.rows(), mat.cols());
|
| 119 |
+
m_analysisIsOk = true;
|
| 120 |
+
m_isInitialized = true;
|
| 121 |
+
m_info = Success;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
/** \brief Performs the numerical factorization of the input matrix \a mat
|
| 125 |
+
*
|
| 126 |
+
* The method analyzePattern() or compute() must have been called beforehand
|
| 127 |
+
* with a matrix having the same pattern.
|
| 128 |
+
*
|
| 129 |
+
* \sa compute(), analyzePattern()
|
| 130 |
+
*/
|
| 131 |
+
template<typename MatrixType>
|
| 132 |
+
void factorize(const MatrixType& mat);
|
| 133 |
+
|
| 134 |
+
/** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat
|
| 135 |
+
*
|
| 136 |
+
* It is a shortcut for a sequential call to the analyzePattern() and factorize() methods.
|
| 137 |
+
*
|
| 138 |
+
* \sa analyzePattern(), factorize()
|
| 139 |
+
*/
|
| 140 |
+
template<typename MatrixType>
|
| 141 |
+
void compute(const MatrixType& mat)
|
| 142 |
+
{
|
| 143 |
+
analyzePattern(mat);
|
| 144 |
+
factorize(mat);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
// internal
|
| 148 |
+
template<typename Rhs, typename Dest>
|
| 149 |
+
void _solve_impl(const Rhs& b, Dest& x) const
|
| 150 |
+
{
|
| 151 |
+
eigen_assert(m_factorizationIsOk && "factorize() should be called first");
|
| 152 |
+
if (m_perm.rows() == b.rows()) x = m_perm * b;
|
| 153 |
+
else x = b;
|
| 154 |
+
x = m_scale.asDiagonal() * x;
|
| 155 |
+
x = m_L.template triangularView<Lower>().solve(x);
|
| 156 |
+
x = m_L.adjoint().template triangularView<Upper>().solve(x);
|
| 157 |
+
x = m_scale.asDiagonal() * x;
|
| 158 |
+
if (m_perm.rows() == b.rows())
|
| 159 |
+
x = m_perm.inverse() * x;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
/** \returns the sparse lower triangular factor L */
|
| 163 |
+
const FactorType& matrixL() const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); return m_L; }
|
| 164 |
+
|
| 165 |
+
/** \returns a vector representing the scaling factor S */
|
| 166 |
+
const VectorRx& scalingS() const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); return m_scale; }
|
| 167 |
+
|
| 168 |
+
/** \returns the fill-in reducing permutation P (can be empty for a natural ordering) */
|
| 169 |
+
const PermutationType& permutationP() const { eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); return m_perm; }
|
| 170 |
+
|
| 171 |
+
protected:
|
| 172 |
+
FactorType m_L; // The lower part stored in CSC
|
| 173 |
+
VectorRx m_scale; // The vector for scaling the matrix
|
| 174 |
+
RealScalar m_initialShift; // The initial shift parameter
|
| 175 |
+
bool m_analysisIsOk;
|
| 176 |
+
bool m_factorizationIsOk;
|
| 177 |
+
ComputationInfo m_info;
|
| 178 |
+
PermutationType m_perm;
|
| 179 |
+
|
| 180 |
+
private:
|
| 181 |
+
inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol);
|
| 182 |
+
};
|
| 183 |
+
|
| 184 |
+
// Based on the following paper:
|
| 185 |
+
// C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
|
| 186 |
+
// Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
|
| 187 |
+
// http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf
|
| 188 |
+
template<typename Scalar, int _UpLo, typename OrderingType>
|
| 189 |
+
template<typename _MatrixType>
|
| 190 |
+
void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
|
| 191 |
+
{
|
| 192 |
+
using std::sqrt;
|
| 193 |
+
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
|
| 194 |
+
|
| 195 |
+
// Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
|
| 196 |
+
|
| 197 |
+
// Apply the fill-reducing permutation computed in analyzePattern()
|
| 198 |
+
if (m_perm.rows() == mat.rows() ) // To detect the null permutation
|
| 199 |
+
{
|
| 200 |
+
// The temporary is needed to make sure that the diagonal entry is properly sorted
|
| 201 |
+
FactorType tmp(mat.rows(), mat.cols());
|
| 202 |
+
tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);
|
| 203 |
+
m_L.template selfadjointView<Lower>() = tmp.template selfadjointView<Lower>();
|
| 204 |
+
}
|
| 205 |
+
else
|
| 206 |
+
{
|
| 207 |
+
m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
Index n = m_L.cols();
|
| 211 |
+
Index nnz = m_L.nonZeros();
|
| 212 |
+
Map<VectorSx> vals(m_L.valuePtr(), nnz); //values
|
| 213 |
+
Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices
|
| 214 |
+
Map<VectorIx> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row
|
| 215 |
+
VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization
|
| 216 |
+
VectorList listCol(n); // listCol(j) is a linked list of columns to update column j
|
| 217 |
+
VectorSx col_vals(n); // Store a nonzero values in each column
|
| 218 |
+
VectorIx col_irow(n); // Row indices of nonzero elements in each column
|
| 219 |
+
VectorIx col_pattern(n);
|
| 220 |
+
col_pattern.fill(-1);
|
| 221 |
+
StorageIndex col_nnz;
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
// Computes the scaling factors
|
| 225 |
+
m_scale.resize(n);
|
| 226 |
+
m_scale.setZero();
|
| 227 |
+
for (Index j = 0; j < n; j++)
|
| 228 |
+
for (Index k = colPtr[j]; k < colPtr[j+1]; k++)
|
| 229 |
+
{
|
| 230 |
+
m_scale(j) += numext::abs2(vals(k));
|
| 231 |
+
if(rowIdx[k]!=j)
|
| 232 |
+
m_scale(rowIdx[k]) += numext::abs2(vals(k));
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
m_scale = m_scale.cwiseSqrt().cwiseSqrt();
|
| 236 |
+
|
| 237 |
+
for (Index j = 0; j < n; ++j)
|
| 238 |
+
if(m_scale(j)>(std::numeric_limits<RealScalar>::min)())
|
| 239 |
+
m_scale(j) = RealScalar(1)/m_scale(j);
|
| 240 |
+
else
|
| 241 |
+
m_scale(j) = 1;
|
| 242 |
+
|
| 243 |
+
// TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster)
|
| 244 |
+
|
| 245 |
+
// Scale and compute the shift for the matrix
|
| 246 |
+
RealScalar mindiag = NumTraits<RealScalar>::highest();
|
| 247 |
+
for (Index j = 0; j < n; j++)
|
| 248 |
+
{
|
| 249 |
+
for (Index k = colPtr[j]; k < colPtr[j+1]; k++)
|
| 250 |
+
vals[k] *= (m_scale(j)*m_scale(rowIdx[k]));
|
| 251 |
+
eigen_internal_assert(rowIdx[colPtr[j]]==j && "IncompleteCholesky: only the lower triangular part must be stored");
|
| 252 |
+
mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag);
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
FactorType L_save = m_L;
|
| 256 |
+
|
| 257 |
+
RealScalar shift = 0;
|
| 258 |
+
if(mindiag <= RealScalar(0.))
|
| 259 |
+
shift = m_initialShift - mindiag;
|
| 260 |
+
|
| 261 |
+
m_info = NumericalIssue;
|
| 262 |
+
|
| 263 |
+
// Try to perform the incomplete factorization using the current shift
|
| 264 |
+
int iter = 0;
|
| 265 |
+
do
|
| 266 |
+
{
|
| 267 |
+
// Apply the shift to the diagonal elements of the matrix
|
| 268 |
+
for (Index j = 0; j < n; j++)
|
| 269 |
+
vals[colPtr[j]] += shift;
|
| 270 |
+
|
| 271 |
+
// jki version of the Cholesky factorization
|
| 272 |
+
Index j=0;
|
| 273 |
+
for (; j < n; ++j)
|
| 274 |
+
{
|
| 275 |
+
// Left-looking factorization of the j-th column
|
| 276 |
+
// First, load the j-th column into col_vals
|
| 277 |
+
Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored
|
| 278 |
+
col_nnz = 0;
|
| 279 |
+
for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++)
|
| 280 |
+
{
|
| 281 |
+
StorageIndex l = rowIdx[i];
|
| 282 |
+
col_vals(col_nnz) = vals[i];
|
| 283 |
+
col_irow(col_nnz) = l;
|
| 284 |
+
col_pattern(l) = col_nnz;
|
| 285 |
+
col_nnz++;
|
| 286 |
+
}
|
| 287 |
+
{
|
| 288 |
+
typename std::list<StorageIndex>::iterator k;
|
| 289 |
+
// Browse all previous columns that will update column j
|
| 290 |
+
for(k = listCol[j].begin(); k != listCol[j].end(); k++)
|
| 291 |
+
{
|
| 292 |
+
Index jk = firstElt(*k); // First element to use in the column
|
| 293 |
+
eigen_internal_assert(rowIdx[jk]==j);
|
| 294 |
+
Scalar v_j_jk = numext::conj(vals[jk]);
|
| 295 |
+
|
| 296 |
+
jk += 1;
|
| 297 |
+
for (Index i = jk; i < colPtr[*k+1]; i++)
|
| 298 |
+
{
|
| 299 |
+
StorageIndex l = rowIdx[i];
|
| 300 |
+
if(col_pattern[l]<0)
|
| 301 |
+
{
|
| 302 |
+
col_vals(col_nnz) = vals[i] * v_j_jk;
|
| 303 |
+
col_irow[col_nnz] = l;
|
| 304 |
+
col_pattern(l) = col_nnz;
|
| 305 |
+
col_nnz++;
|
| 306 |
+
}
|
| 307 |
+
else
|
| 308 |
+
col_vals(col_pattern[l]) -= vals[i] * v_j_jk;
|
| 309 |
+
}
|
| 310 |
+
updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
// Scale the current column
|
| 315 |
+
if(numext::real(diag) <= 0)
|
| 316 |
+
{
|
| 317 |
+
if(++iter>=10)
|
| 318 |
+
return;
|
| 319 |
+
|
| 320 |
+
// increase shift
|
| 321 |
+
shift = numext::maxi(m_initialShift,RealScalar(2)*shift);
|
| 322 |
+
// restore m_L, col_pattern, and listCol
|
| 323 |
+
vals = Map<const VectorSx>(L_save.valuePtr(), nnz);
|
| 324 |
+
rowIdx = Map<const VectorIx>(L_save.innerIndexPtr(), nnz);
|
| 325 |
+
colPtr = Map<const VectorIx>(L_save.outerIndexPtr(), n+1);
|
| 326 |
+
col_pattern.fill(-1);
|
| 327 |
+
for(Index i=0; i<n; ++i)
|
| 328 |
+
listCol[i].clear();
|
| 329 |
+
|
| 330 |
+
break;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
RealScalar rdiag = sqrt(numext::real(diag));
|
| 334 |
+
vals[colPtr[j]] = rdiag;
|
| 335 |
+
for (Index k = 0; k<col_nnz; ++k)
|
| 336 |
+
{
|
| 337 |
+
Index i = col_irow[k];
|
| 338 |
+
//Scale
|
| 339 |
+
col_vals(k) /= rdiag;
|
| 340 |
+
//Update the remaining diagonals with col_vals
|
| 341 |
+
vals[colPtr[i]] -= numext::abs2(col_vals(k));
|
| 342 |
+
}
|
| 343 |
+
// Select the largest p elements
|
| 344 |
+
// p is the original number of elements in the column (without the diagonal)
|
| 345 |
+
Index p = colPtr[j+1] - colPtr[j] - 1 ;
|
| 346 |
+
Ref<VectorSx> cvals = col_vals.head(col_nnz);
|
| 347 |
+
Ref<VectorIx> cirow = col_irow.head(col_nnz);
|
| 348 |
+
internal::QuickSplit(cvals,cirow, p);
|
| 349 |
+
// Insert the largest p elements in the matrix
|
| 350 |
+
Index cpt = 0;
|
| 351 |
+
for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++)
|
| 352 |
+
{
|
| 353 |
+
vals[i] = col_vals(cpt);
|
| 354 |
+
rowIdx[i] = col_irow(cpt);
|
| 355 |
+
// restore col_pattern:
|
| 356 |
+
col_pattern(col_irow(cpt)) = -1;
|
| 357 |
+
cpt++;
|
| 358 |
+
}
|
| 359 |
+
// Get the first smallest row index and put it after the diagonal element
|
| 360 |
+
Index jk = colPtr(j)+1;
|
| 361 |
+
updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol);
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
if(j==n)
|
| 365 |
+
{
|
| 366 |
+
m_factorizationIsOk = true;
|
| 367 |
+
m_info = Success;
|
| 368 |
+
}
|
| 369 |
+
} while(m_info!=Success);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
template<typename Scalar, int _UpLo, typename OrderingType>
|
| 373 |
+
inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol)
|
| 374 |
+
{
|
| 375 |
+
if (jk < colPtr(col+1) )
|
| 376 |
+
{
|
| 377 |
+
Index p = colPtr(col+1) - jk;
|
| 378 |
+
Index minpos;
|
| 379 |
+
rowIdx.segment(jk,p).minCoeff(&minpos);
|
| 380 |
+
minpos += jk;
|
| 381 |
+
if (rowIdx(minpos) != rowIdx(jk))
|
| 382 |
+
{
|
| 383 |
+
//Swap
|
| 384 |
+
std::swap(rowIdx(jk),rowIdx(minpos));
|
| 385 |
+
std::swap(vals(jk),vals(minpos));
|
| 386 |
+
}
|
| 387 |
+
firstElt(col) = internal::convert_index<StorageIndex,Index>(jk);
|
| 388 |
+
listCol[rowIdx(jk)].push_back(internal::convert_index<StorageIndex,Index>(col));
|
| 389 |
+
}
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
} // end namespace Eigen
|
| 393 |
+
|
| 394 |
+
#endif
|
include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
ADDED
|
@@ -0,0 +1,453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
| 5 |
+
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 6 |
+
//
|
| 7 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 8 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 9 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 10 |
+
|
| 11 |
+
#ifndef EIGEN_INCOMPLETE_LUT_H
|
| 12 |
+
#define EIGEN_INCOMPLETE_LUT_H
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
namespace Eigen {
|
| 16 |
+
|
| 17 |
+
namespace internal {
|
| 18 |
+
|
| 19 |
+
/** \internal
|
| 20 |
+
* Compute a quick-sort split of a vector
|
| 21 |
+
* On output, the vector row is permuted such that its elements satisfy
|
| 22 |
+
* abs(row(i)) >= abs(row(ncut)) if i<ncut
|
| 23 |
+
* abs(row(i)) <= abs(row(ncut)) if i>ncut
|
| 24 |
+
* \param row The vector of values
|
| 25 |
+
* \param ind The array of index for the elements in @p row
|
| 26 |
+
* \param ncut The number of largest elements to keep
|
| 27 |
+
**/
|
| 28 |
+
template <typename VectorV, typename VectorI>
|
| 29 |
+
Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
|
| 30 |
+
{
|
| 31 |
+
typedef typename VectorV::RealScalar RealScalar;
|
| 32 |
+
using std::swap;
|
| 33 |
+
using std::abs;
|
| 34 |
+
Index mid;
|
| 35 |
+
Index n = row.size(); /* length of the vector */
|
| 36 |
+
Index first, last ;
|
| 37 |
+
|
| 38 |
+
ncut--; /* to fit the zero-based indices */
|
| 39 |
+
first = 0;
|
| 40 |
+
last = n-1;
|
| 41 |
+
if (ncut < first || ncut > last ) return 0;
|
| 42 |
+
|
| 43 |
+
do {
|
| 44 |
+
mid = first;
|
| 45 |
+
RealScalar abskey = abs(row(mid));
|
| 46 |
+
for (Index j = first + 1; j <= last; j++) {
|
| 47 |
+
if ( abs(row(j)) > abskey) {
|
| 48 |
+
++mid;
|
| 49 |
+
swap(row(mid), row(j));
|
| 50 |
+
swap(ind(mid), ind(j));
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
/* Interchange for the pivot element */
|
| 54 |
+
swap(row(mid), row(first));
|
| 55 |
+
swap(ind(mid), ind(first));
|
| 56 |
+
|
| 57 |
+
if (mid > ncut) last = mid - 1;
|
| 58 |
+
else if (mid < ncut ) first = mid + 1;
|
| 59 |
+
} while (mid != ncut );
|
| 60 |
+
|
| 61 |
+
return 0; /* mid is equal to ncut */
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
}// end namespace internal
|
| 65 |
+
|
| 66 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 67 |
+
* \class IncompleteLUT
|
| 68 |
+
* \brief Incomplete LU factorization with dual-threshold strategy
|
| 69 |
+
*
|
| 70 |
+
* \implsparsesolverconcept
|
| 71 |
+
*
|
| 72 |
+
* During the numerical factorization, two dropping rules are used :
|
| 73 |
+
* 1) any element whose magnitude is less than some tolerance is dropped.
|
| 74 |
+
* This tolerance is obtained by multiplying the input tolerance @p droptol
|
| 75 |
+
* by the average magnitude of all the original elements in the current row.
|
| 76 |
+
* 2) After the elimination of the row, only the @p fill largest elements in
|
| 77 |
+
* the L part and the @p fill largest elements in the U part are kept
|
| 78 |
+
* (in addition to the diagonal element ). Note that @p fill is computed from
|
| 79 |
+
* the input parameter @p fillfactor which is used the ratio to control the fill_in
|
| 80 |
+
* relatively to the initial number of nonzero elements.
|
| 81 |
+
*
|
| 82 |
+
* The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
|
| 83 |
+
* and when @p fill=n/2 with @p droptol being different to zero.
|
| 84 |
+
*
|
| 85 |
+
* References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
|
| 86 |
+
* Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
|
| 87 |
+
*
|
| 88 |
+
* NOTE : The following implementation is derived from the ILUT implementation
|
| 89 |
+
* in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
|
| 90 |
+
* released under the terms of the GNU LGPL:
|
| 91 |
+
* http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
|
| 92 |
+
* However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
|
| 93 |
+
* See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
|
| 94 |
+
* http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
|
| 95 |
+
* alternatively, on GMANE:
|
| 96 |
+
* http://comments.gmane.org/gmane.comp.lib.eigen/3302
|
| 97 |
+
*/
|
| 98 |
+
template <typename _Scalar, typename _StorageIndex = int>
|
| 99 |
+
class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> >
|
| 100 |
+
{
|
| 101 |
+
protected:
|
| 102 |
+
typedef SparseSolverBase<IncompleteLUT> Base;
|
| 103 |
+
using Base::m_isInitialized;
|
| 104 |
+
public:
|
| 105 |
+
typedef _Scalar Scalar;
|
| 106 |
+
typedef _StorageIndex StorageIndex;
|
| 107 |
+
typedef typename NumTraits<Scalar>::Real RealScalar;
|
| 108 |
+
typedef Matrix<Scalar,Dynamic,1> Vector;
|
| 109 |
+
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
| 110 |
+
typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType;
|
| 111 |
+
|
| 112 |
+
enum {
|
| 113 |
+
ColsAtCompileTime = Dynamic,
|
| 114 |
+
MaxColsAtCompileTime = Dynamic
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
public:
|
| 118 |
+
|
| 119 |
+
IncompleteLUT()
|
| 120 |
+
: m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
|
| 121 |
+
m_analysisIsOk(false), m_factorizationIsOk(false)
|
| 122 |
+
{}
|
| 123 |
+
|
| 124 |
+
template<typename MatrixType>
|
| 125 |
+
explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
|
| 126 |
+
: m_droptol(droptol),m_fillfactor(fillfactor),
|
| 127 |
+
m_analysisIsOk(false),m_factorizationIsOk(false)
|
| 128 |
+
{
|
| 129 |
+
eigen_assert(fillfactor != 0);
|
| 130 |
+
compute(mat);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_lu.rows(); }
|
| 134 |
+
|
| 135 |
+
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_lu.cols(); }
|
| 136 |
+
|
| 137 |
+
/** \brief Reports whether previous computation was successful.
|
| 138 |
+
*
|
| 139 |
+
* \returns \c Success if computation was successful,
|
| 140 |
+
* \c NumericalIssue if the matrix.appears to be negative.
|
| 141 |
+
*/
|
| 142 |
+
ComputationInfo info() const
|
| 143 |
+
{
|
| 144 |
+
eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
|
| 145 |
+
return m_info;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
template<typename MatrixType>
|
| 149 |
+
void analyzePattern(const MatrixType& amat);
|
| 150 |
+
|
| 151 |
+
template<typename MatrixType>
|
| 152 |
+
void factorize(const MatrixType& amat);
|
| 153 |
+
|
| 154 |
+
/**
|
| 155 |
+
* Compute an incomplete LU factorization with dual threshold on the matrix mat
|
| 156 |
+
* No pivoting is done in this version
|
| 157 |
+
*
|
| 158 |
+
**/
|
| 159 |
+
template<typename MatrixType>
|
| 160 |
+
IncompleteLUT& compute(const MatrixType& amat)
|
| 161 |
+
{
|
| 162 |
+
analyzePattern(amat);
|
| 163 |
+
factorize(amat);
|
| 164 |
+
return *this;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
void setDroptol(const RealScalar& droptol);
|
| 168 |
+
void setFillfactor(int fillfactor);
|
| 169 |
+
|
| 170 |
+
template<typename Rhs, typename Dest>
|
| 171 |
+
void _solve_impl(const Rhs& b, Dest& x) const
|
| 172 |
+
{
|
| 173 |
+
x = m_Pinv * b;
|
| 174 |
+
x = m_lu.template triangularView<UnitLower>().solve(x);
|
| 175 |
+
x = m_lu.template triangularView<Upper>().solve(x);
|
| 176 |
+
x = m_P * x;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
protected:
|
| 180 |
+
|
| 181 |
+
/** keeps off-diagonal entries; drops diagonal entries */
|
| 182 |
+
struct keep_diag {
|
| 183 |
+
inline bool operator() (const Index& row, const Index& col, const Scalar&) const
|
| 184 |
+
{
|
| 185 |
+
return row!=col;
|
| 186 |
+
}
|
| 187 |
+
};
|
| 188 |
+
|
| 189 |
+
protected:
|
| 190 |
+
|
| 191 |
+
FactorType m_lu;
|
| 192 |
+
RealScalar m_droptol;
|
| 193 |
+
int m_fillfactor;
|
| 194 |
+
bool m_analysisIsOk;
|
| 195 |
+
bool m_factorizationIsOk;
|
| 196 |
+
ComputationInfo m_info;
|
| 197 |
+
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation
|
| 198 |
+
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation
|
| 199 |
+
};
|
| 200 |
+
|
| 201 |
+
/**
|
| 202 |
+
* Set control parameter droptol
|
| 203 |
+
* \param droptol Drop any element whose magnitude is less than this tolerance
|
| 204 |
+
**/
|
| 205 |
+
template<typename Scalar, typename StorageIndex>
|
| 206 |
+
void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol)
|
| 207 |
+
{
|
| 208 |
+
this->m_droptol = droptol;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
/**
|
| 212 |
+
* Set control parameter fillfactor
|
| 213 |
+
* \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
|
| 214 |
+
**/
|
| 215 |
+
template<typename Scalar, typename StorageIndex>
|
| 216 |
+
void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor)
|
| 217 |
+
{
|
| 218 |
+
this->m_fillfactor = fillfactor;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
template <typename Scalar, typename StorageIndex>
|
| 222 |
+
template<typename _MatrixType>
|
| 223 |
+
void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
|
| 224 |
+
{
|
| 225 |
+
// Compute the Fill-reducing permutation
|
| 226 |
+
// Since ILUT does not perform any numerical pivoting,
|
| 227 |
+
// it is highly preferable to keep the diagonal through symmetric permutations.
|
| 228 |
+
// To this end, let's symmetrize the pattern and perform AMD on it.
|
| 229 |
+
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
|
| 230 |
+
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
|
| 231 |
+
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
|
| 232 |
+
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred...
|
| 233 |
+
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
|
| 234 |
+
AMDOrdering<StorageIndex> ordering;
|
| 235 |
+
ordering(AtA,m_P);
|
| 236 |
+
m_Pinv = m_P.inverse(); // cache the inverse permutation
|
| 237 |
+
m_analysisIsOk = true;
|
| 238 |
+
m_factorizationIsOk = false;
|
| 239 |
+
m_isInitialized = true;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
template <typename Scalar, typename StorageIndex>
|
| 243 |
+
template<typename _MatrixType>
|
| 244 |
+
void IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat)
|
| 245 |
+
{
|
| 246 |
+
using std::sqrt;
|
| 247 |
+
using std::swap;
|
| 248 |
+
using std::abs;
|
| 249 |
+
using internal::convert_index;
|
| 250 |
+
|
| 251 |
+
eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
|
| 252 |
+
Index n = amat.cols(); // Size of the matrix
|
| 253 |
+
m_lu.resize(n,n);
|
| 254 |
+
// Declare Working vectors and variables
|
| 255 |
+
Vector u(n) ; // real values of the row -- maximum size is n --
|
| 256 |
+
VectorI ju(n); // column position of the values in u -- maximum size is n
|
| 257 |
+
VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
|
| 258 |
+
|
| 259 |
+
// Apply the fill-reducing permutation
|
| 260 |
+
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
| 261 |
+
SparseMatrix<Scalar,RowMajor, StorageIndex> mat;
|
| 262 |
+
mat = amat.twistedBy(m_Pinv);
|
| 263 |
+
|
| 264 |
+
// Initialization
|
| 265 |
+
jr.fill(-1);
|
| 266 |
+
ju.fill(0);
|
| 267 |
+
u.fill(0);
|
| 268 |
+
|
| 269 |
+
// number of largest elements to keep in each row:
|
| 270 |
+
Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1;
|
| 271 |
+
if (fill_in > n) fill_in = n;
|
| 272 |
+
|
| 273 |
+
// number of largest nonzero elements to keep in the L and the U part of the current row:
|
| 274 |
+
Index nnzL = fill_in/2;
|
| 275 |
+
Index nnzU = nnzL;
|
| 276 |
+
m_lu.reserve(n * (nnzL + nnzU + 1));
|
| 277 |
+
|
| 278 |
+
// global loop over the rows of the sparse matrix
|
| 279 |
+
for (Index ii = 0; ii < n; ii++)
|
| 280 |
+
{
|
| 281 |
+
// 1 - copy the lower and the upper part of the row i of mat in the working vector u
|
| 282 |
+
|
| 283 |
+
Index sizeu = 1; // number of nonzero elements in the upper part of the current row
|
| 284 |
+
Index sizel = 0; // number of nonzero elements in the lower part of the current row
|
| 285 |
+
ju(ii) = convert_index<StorageIndex>(ii);
|
| 286 |
+
u(ii) = 0;
|
| 287 |
+
jr(ii) = convert_index<StorageIndex>(ii);
|
| 288 |
+
RealScalar rownorm = 0;
|
| 289 |
+
|
| 290 |
+
typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
|
| 291 |
+
for (; j_it; ++j_it)
|
| 292 |
+
{
|
| 293 |
+
Index k = j_it.index();
|
| 294 |
+
if (k < ii)
|
| 295 |
+
{
|
| 296 |
+
// copy the lower part
|
| 297 |
+
ju(sizel) = convert_index<StorageIndex>(k);
|
| 298 |
+
u(sizel) = j_it.value();
|
| 299 |
+
jr(k) = convert_index<StorageIndex>(sizel);
|
| 300 |
+
++sizel;
|
| 301 |
+
}
|
| 302 |
+
else if (k == ii)
|
| 303 |
+
{
|
| 304 |
+
u(ii) = j_it.value();
|
| 305 |
+
}
|
| 306 |
+
else
|
| 307 |
+
{
|
| 308 |
+
// copy the upper part
|
| 309 |
+
Index jpos = ii + sizeu;
|
| 310 |
+
ju(jpos) = convert_index<StorageIndex>(k);
|
| 311 |
+
u(jpos) = j_it.value();
|
| 312 |
+
jr(k) = convert_index<StorageIndex>(jpos);
|
| 313 |
+
++sizeu;
|
| 314 |
+
}
|
| 315 |
+
rownorm += numext::abs2(j_it.value());
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
// 2 - detect possible zero row
|
| 319 |
+
if(rownorm==0)
|
| 320 |
+
{
|
| 321 |
+
m_info = NumericalIssue;
|
| 322 |
+
return;
|
| 323 |
+
}
|
| 324 |
+
// Take the 2-norm of the current row as a relative tolerance
|
| 325 |
+
rownorm = sqrt(rownorm);
|
| 326 |
+
|
| 327 |
+
// 3 - eliminate the previous nonzero rows
|
| 328 |
+
Index jj = 0;
|
| 329 |
+
Index len = 0;
|
| 330 |
+
while (jj < sizel)
|
| 331 |
+
{
|
| 332 |
+
// In order to eliminate in the correct order,
|
| 333 |
+
// we must select first the smallest column index among ju(jj:sizel)
|
| 334 |
+
Index k;
|
| 335 |
+
Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
|
| 336 |
+
k += jj;
|
| 337 |
+
if (minrow != ju(jj))
|
| 338 |
+
{
|
| 339 |
+
// swap the two locations
|
| 340 |
+
Index j = ju(jj);
|
| 341 |
+
swap(ju(jj), ju(k));
|
| 342 |
+
jr(minrow) = convert_index<StorageIndex>(jj);
|
| 343 |
+
jr(j) = convert_index<StorageIndex>(k);
|
| 344 |
+
swap(u(jj), u(k));
|
| 345 |
+
}
|
| 346 |
+
// Reset this location
|
| 347 |
+
jr(minrow) = -1;
|
| 348 |
+
|
| 349 |
+
// Start elimination
|
| 350 |
+
typename FactorType::InnerIterator ki_it(m_lu, minrow);
|
| 351 |
+
while (ki_it && ki_it.index() < minrow) ++ki_it;
|
| 352 |
+
eigen_internal_assert(ki_it && ki_it.col()==minrow);
|
| 353 |
+
Scalar fact = u(jj) / ki_it.value();
|
| 354 |
+
|
| 355 |
+
// drop too small elements
|
| 356 |
+
if(abs(fact) <= m_droptol)
|
| 357 |
+
{
|
| 358 |
+
jj++;
|
| 359 |
+
continue;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
// linear combination of the current row ii and the row minrow
|
| 363 |
+
++ki_it;
|
| 364 |
+
for (; ki_it; ++ki_it)
|
| 365 |
+
{
|
| 366 |
+
Scalar prod = fact * ki_it.value();
|
| 367 |
+
Index j = ki_it.index();
|
| 368 |
+
Index jpos = jr(j);
|
| 369 |
+
if (jpos == -1) // fill-in element
|
| 370 |
+
{
|
| 371 |
+
Index newpos;
|
| 372 |
+
if (j >= ii) // dealing with the upper part
|
| 373 |
+
{
|
| 374 |
+
newpos = ii + sizeu;
|
| 375 |
+
sizeu++;
|
| 376 |
+
eigen_internal_assert(sizeu<=n);
|
| 377 |
+
}
|
| 378 |
+
else // dealing with the lower part
|
| 379 |
+
{
|
| 380 |
+
newpos = sizel;
|
| 381 |
+
sizel++;
|
| 382 |
+
eigen_internal_assert(sizel<=ii);
|
| 383 |
+
}
|
| 384 |
+
ju(newpos) = convert_index<StorageIndex>(j);
|
| 385 |
+
u(newpos) = -prod;
|
| 386 |
+
jr(j) = convert_index<StorageIndex>(newpos);
|
| 387 |
+
}
|
| 388 |
+
else
|
| 389 |
+
u(jpos) -= prod;
|
| 390 |
+
}
|
| 391 |
+
// store the pivot element
|
| 392 |
+
u(len) = fact;
|
| 393 |
+
ju(len) = convert_index<StorageIndex>(minrow);
|
| 394 |
+
++len;
|
| 395 |
+
|
| 396 |
+
jj++;
|
| 397 |
+
} // end of the elimination on the row ii
|
| 398 |
+
|
| 399 |
+
// reset the upper part of the pointer jr to zero
|
| 400 |
+
for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
|
| 401 |
+
|
| 402 |
+
// 4 - partially sort and insert the elements in the m_lu matrix
|
| 403 |
+
|
| 404 |
+
// sort the L-part of the row
|
| 405 |
+
sizel = len;
|
| 406 |
+
len = (std::min)(sizel, nnzL);
|
| 407 |
+
typename Vector::SegmentReturnType ul(u.segment(0, sizel));
|
| 408 |
+
typename VectorI::SegmentReturnType jul(ju.segment(0, sizel));
|
| 409 |
+
internal::QuickSplit(ul, jul, len);
|
| 410 |
+
|
| 411 |
+
// store the largest m_fill elements of the L part
|
| 412 |
+
m_lu.startVec(ii);
|
| 413 |
+
for(Index k = 0; k < len; k++)
|
| 414 |
+
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
|
| 415 |
+
|
| 416 |
+
// store the diagonal element
|
| 417 |
+
// apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
|
| 418 |
+
if (u(ii) == Scalar(0))
|
| 419 |
+
u(ii) = sqrt(m_droptol) * rownorm;
|
| 420 |
+
m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
|
| 421 |
+
|
| 422 |
+
// sort the U-part of the row
|
| 423 |
+
// apply the dropping rule first
|
| 424 |
+
len = 0;
|
| 425 |
+
for(Index k = 1; k < sizeu; k++)
|
| 426 |
+
{
|
| 427 |
+
if(abs(u(ii+k)) > m_droptol * rownorm )
|
| 428 |
+
{
|
| 429 |
+
++len;
|
| 430 |
+
u(ii + len) = u(ii + k);
|
| 431 |
+
ju(ii + len) = ju(ii + k);
|
| 432 |
+
}
|
| 433 |
+
}
|
| 434 |
+
sizeu = len + 1; // +1 to take into account the diagonal element
|
| 435 |
+
len = (std::min)(sizeu, nnzU);
|
| 436 |
+
typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
|
| 437 |
+
typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
|
| 438 |
+
internal::QuickSplit(uu, juu, len);
|
| 439 |
+
|
| 440 |
+
// store the largest elements of the U part
|
| 441 |
+
for(Index k = ii + 1; k < ii + len; k++)
|
| 442 |
+
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
|
| 443 |
+
}
|
| 444 |
+
m_lu.finalize();
|
| 445 |
+
m_lu.makeCompressed();
|
| 446 |
+
|
| 447 |
+
m_factorizationIsOk = true;
|
| 448 |
+
m_info = Success;
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
} // end namespace Eigen
|
| 452 |
+
|
| 453 |
+
#endif // EIGEN_INCOMPLETE_LUT_H
|
include/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
|
| 11 |
+
#define EIGEN_ITERATIVE_SOLVER_BASE_H
|
| 12 |
+
|
| 13 |
+
namespace Eigen {
|
| 14 |
+
|
| 15 |
+
namespace internal {
|
| 16 |
+
|
| 17 |
+
template<typename MatrixType>
|
| 18 |
+
struct is_ref_compatible_impl
|
| 19 |
+
{
|
| 20 |
+
private:
|
| 21 |
+
template <typename T0>
|
| 22 |
+
struct any_conversion
|
| 23 |
+
{
|
| 24 |
+
template <typename T> any_conversion(const volatile T&);
|
| 25 |
+
template <typename T> any_conversion(T&);
|
| 26 |
+
};
|
| 27 |
+
struct yes {int a[1];};
|
| 28 |
+
struct no {int a[2];};
|
| 29 |
+
|
| 30 |
+
template<typename T>
|
| 31 |
+
static yes test(const Ref<const T>&, int);
|
| 32 |
+
template<typename T>
|
| 33 |
+
static no test(any_conversion<T>, ...);
|
| 34 |
+
|
| 35 |
+
public:
|
| 36 |
+
static MatrixType ms_from;
|
| 37 |
+
enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) };
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
template<typename MatrixType>
|
| 41 |
+
struct is_ref_compatible
|
| 42 |
+
{
|
| 43 |
+
enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value };
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value>
|
| 47 |
+
class generic_matrix_wrapper;
|
| 48 |
+
|
| 49 |
+
// We have an explicit matrix at hand, compatible with Ref<>
|
| 50 |
+
template<typename MatrixType>
|
| 51 |
+
class generic_matrix_wrapper<MatrixType,false>
|
| 52 |
+
{
|
| 53 |
+
public:
|
| 54 |
+
typedef Ref<const MatrixType> ActualMatrixType;
|
| 55 |
+
template<int UpLo> struct ConstSelfAdjointViewReturnType {
|
| 56 |
+
typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type;
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
enum {
|
| 60 |
+
MatrixFree = false
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
generic_matrix_wrapper()
|
| 64 |
+
: m_dummy(0,0), m_matrix(m_dummy)
|
| 65 |
+
{}
|
| 66 |
+
|
| 67 |
+
template<typename InputType>
|
| 68 |
+
generic_matrix_wrapper(const InputType &mat)
|
| 69 |
+
: m_matrix(mat)
|
| 70 |
+
{}
|
| 71 |
+
|
| 72 |
+
const ActualMatrixType& matrix() const
|
| 73 |
+
{
|
| 74 |
+
return m_matrix;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
template<typename MatrixDerived>
|
| 78 |
+
void grab(const EigenBase<MatrixDerived> &mat)
|
| 79 |
+
{
|
| 80 |
+
m_matrix.~Ref<const MatrixType>();
|
| 81 |
+
::new (&m_matrix) Ref<const MatrixType>(mat.derived());
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
void grab(const Ref<const MatrixType> &mat)
|
| 85 |
+
{
|
| 86 |
+
if(&(mat.derived()) != &m_matrix)
|
| 87 |
+
{
|
| 88 |
+
m_matrix.~Ref<const MatrixType>();
|
| 89 |
+
::new (&m_matrix) Ref<const MatrixType>(mat);
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
protected:
|
| 94 |
+
MatrixType m_dummy; // used to default initialize the Ref<> object
|
| 95 |
+
ActualMatrixType m_matrix;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
// MatrixType is not compatible with Ref<> -> matrix-free wrapper
|
| 99 |
+
template<typename MatrixType>
|
| 100 |
+
class generic_matrix_wrapper<MatrixType,true>
|
| 101 |
+
{
|
| 102 |
+
public:
|
| 103 |
+
typedef MatrixType ActualMatrixType;
|
| 104 |
+
template<int UpLo> struct ConstSelfAdjointViewReturnType
|
| 105 |
+
{
|
| 106 |
+
typedef ActualMatrixType Type;
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
enum {
|
| 110 |
+
MatrixFree = true
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
generic_matrix_wrapper()
|
| 114 |
+
: mp_matrix(0)
|
| 115 |
+
{}
|
| 116 |
+
|
| 117 |
+
generic_matrix_wrapper(const MatrixType &mat)
|
| 118 |
+
: mp_matrix(&mat)
|
| 119 |
+
{}
|
| 120 |
+
|
| 121 |
+
const ActualMatrixType& matrix() const
|
| 122 |
+
{
|
| 123 |
+
return *mp_matrix;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
void grab(const MatrixType &mat)
|
| 127 |
+
{
|
| 128 |
+
mp_matrix = &mat;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
protected:
|
| 132 |
+
const ActualMatrixType *mp_matrix;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 138 |
+
* \brief Base class for linear iterative solvers
|
| 139 |
+
*
|
| 140 |
+
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
| 141 |
+
*/
|
| 142 |
+
template< typename Derived>
|
| 143 |
+
class IterativeSolverBase : public SparseSolverBase<Derived>
|
| 144 |
+
{
|
| 145 |
+
protected:
|
| 146 |
+
typedef SparseSolverBase<Derived> Base;
|
| 147 |
+
using Base::m_isInitialized;
|
| 148 |
+
|
| 149 |
+
public:
|
| 150 |
+
typedef typename internal::traits<Derived>::MatrixType MatrixType;
|
| 151 |
+
typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
|
| 152 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 153 |
+
typedef typename MatrixType::StorageIndex StorageIndex;
|
| 154 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 155 |
+
|
| 156 |
+
enum {
|
| 157 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 158 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
public:
|
| 162 |
+
|
| 163 |
+
using Base::derived;
|
| 164 |
+
|
| 165 |
+
/** Default constructor. */
|
| 166 |
+
IterativeSolverBase()
|
| 167 |
+
{
|
| 168 |
+
init();
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
| 172 |
+
*
|
| 173 |
+
* This constructor is a shortcut for the default constructor followed
|
| 174 |
+
* by a call to compute().
|
| 175 |
+
*
|
| 176 |
+
* \warning this class stores a reference to the matrix A as well as some
|
| 177 |
+
* precomputed values that depend on it. Therefore, if \a A is changed
|
| 178 |
+
* this class becomes invalid. Call compute() to update it with the new
|
| 179 |
+
* matrix A, or modify a copy of A.
|
| 180 |
+
*/
|
| 181 |
+
template<typename MatrixDerived>
|
| 182 |
+
explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A)
|
| 183 |
+
: m_matrixWrapper(A.derived())
|
| 184 |
+
{
|
| 185 |
+
init();
|
| 186 |
+
compute(matrix());
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
~IterativeSolverBase() {}
|
| 190 |
+
|
| 191 |
+
/** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems.
|
| 192 |
+
*
|
| 193 |
+
* Currently, this function mostly calls analyzePattern on the preconditioner. In the future
|
| 194 |
+
* we might, for instance, implement column reordering for faster matrix vector products.
|
| 195 |
+
*/
|
| 196 |
+
template<typename MatrixDerived>
|
| 197 |
+
Derived& analyzePattern(const EigenBase<MatrixDerived>& A)
|
| 198 |
+
{
|
| 199 |
+
grab(A.derived());
|
| 200 |
+
m_preconditioner.analyzePattern(matrix());
|
| 201 |
+
m_isInitialized = true;
|
| 202 |
+
m_analysisIsOk = true;
|
| 203 |
+
m_info = m_preconditioner.info();
|
| 204 |
+
return derived();
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
/** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
|
| 208 |
+
*
|
| 209 |
+
* Currently, this function mostly calls factorize on the preconditioner.
|
| 210 |
+
*
|
| 211 |
+
* \warning this class stores a reference to the matrix A as well as some
|
| 212 |
+
* precomputed values that depend on it. Therefore, if \a A is changed
|
| 213 |
+
* this class becomes invalid. Call compute() to update it with the new
|
| 214 |
+
* matrix A, or modify a copy of A.
|
| 215 |
+
*/
|
| 216 |
+
template<typename MatrixDerived>
|
| 217 |
+
Derived& factorize(const EigenBase<MatrixDerived>& A)
|
| 218 |
+
{
|
| 219 |
+
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
| 220 |
+
grab(A.derived());
|
| 221 |
+
m_preconditioner.factorize(matrix());
|
| 222 |
+
m_factorizationIsOk = true;
|
| 223 |
+
m_info = m_preconditioner.info();
|
| 224 |
+
return derived();
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
/** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
|
| 228 |
+
*
|
| 229 |
+
* Currently, this function mostly initializes/computes the preconditioner. In the future
|
| 230 |
+
* we might, for instance, implement column reordering for faster matrix vector products.
|
| 231 |
+
*
|
| 232 |
+
* \warning this class stores a reference to the matrix A as well as some
|
| 233 |
+
* precomputed values that depend on it. Therefore, if \a A is changed
|
| 234 |
+
* this class becomes invalid. Call compute() to update it with the new
|
| 235 |
+
* matrix A, or modify a copy of A.
|
| 236 |
+
*/
|
| 237 |
+
template<typename MatrixDerived>
|
| 238 |
+
Derived& compute(const EigenBase<MatrixDerived>& A)
|
| 239 |
+
{
|
| 240 |
+
grab(A.derived());
|
| 241 |
+
m_preconditioner.compute(matrix());
|
| 242 |
+
m_isInitialized = true;
|
| 243 |
+
m_analysisIsOk = true;
|
| 244 |
+
m_factorizationIsOk = true;
|
| 245 |
+
m_info = m_preconditioner.info();
|
| 246 |
+
return derived();
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
/** \internal */
|
| 250 |
+
EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return matrix().rows(); }
|
| 251 |
+
|
| 252 |
+
/** \internal */
|
| 253 |
+
EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return matrix().cols(); }
|
| 254 |
+
|
| 255 |
+
/** \returns the tolerance threshold used by the stopping criteria.
|
| 256 |
+
* \sa setTolerance()
|
| 257 |
+
*/
|
| 258 |
+
RealScalar tolerance() const { return m_tolerance; }
|
| 259 |
+
|
| 260 |
+
/** Sets the tolerance threshold used by the stopping criteria.
|
| 261 |
+
*
|
| 262 |
+
* This value is used as an upper bound to the relative residual error: |Ax-b|/|b|.
|
| 263 |
+
* The default value is the machine precision given by NumTraits<Scalar>::epsilon()
|
| 264 |
+
*/
|
| 265 |
+
Derived& setTolerance(const RealScalar& tolerance)
|
| 266 |
+
{
|
| 267 |
+
m_tolerance = tolerance;
|
| 268 |
+
return derived();
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
/** \returns a read-write reference to the preconditioner for custom configuration. */
|
| 272 |
+
Preconditioner& preconditioner() { return m_preconditioner; }
|
| 273 |
+
|
| 274 |
+
/** \returns a read-only reference to the preconditioner. */
|
| 275 |
+
const Preconditioner& preconditioner() const { return m_preconditioner; }
|
| 276 |
+
|
| 277 |
+
/** \returns the max number of iterations.
|
| 278 |
+
* It is either the value set by setMaxIterations or, by default,
|
| 279 |
+
* twice the number of columns of the matrix.
|
| 280 |
+
*/
|
| 281 |
+
Index maxIterations() const
|
| 282 |
+
{
|
| 283 |
+
return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations;
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
/** Sets the max number of iterations.
|
| 287 |
+
* Default is twice the number of columns of the matrix.
|
| 288 |
+
*/
|
| 289 |
+
Derived& setMaxIterations(Index maxIters)
|
| 290 |
+
{
|
| 291 |
+
m_maxIterations = maxIters;
|
| 292 |
+
return derived();
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
/** \returns the number of iterations performed during the last solve */
|
| 296 |
+
Index iterations() const
|
| 297 |
+
{
|
| 298 |
+
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
|
| 299 |
+
return m_iterations;
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
/** \returns the tolerance error reached during the last solve.
|
| 303 |
+
* It is a close approximation of the true relative residual error |Ax-b|/|b|.
|
| 304 |
+
*/
|
| 305 |
+
RealScalar error() const
|
| 306 |
+
{
|
| 307 |
+
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
|
| 308 |
+
return m_error;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
|
| 312 |
+
* and \a x0 as an initial solution.
|
| 313 |
+
*
|
| 314 |
+
* \sa solve(), compute()
|
| 315 |
+
*/
|
| 316 |
+
template<typename Rhs,typename Guess>
|
| 317 |
+
inline const SolveWithGuess<Derived, Rhs, Guess>
|
| 318 |
+
solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
|
| 319 |
+
{
|
| 320 |
+
eigen_assert(m_isInitialized && "Solver is not initialized.");
|
| 321 |
+
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
|
| 322 |
+
return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0);
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
/** \returns Success if the iterations converged, and NoConvergence otherwise. */
|
| 326 |
+
ComputationInfo info() const
|
| 327 |
+
{
|
| 328 |
+
eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
|
| 329 |
+
return m_info;
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
/** \internal */
|
| 333 |
+
template<typename Rhs, typename DestDerived>
|
| 334 |
+
void _solve_with_guess_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const
|
| 335 |
+
{
|
| 336 |
+
eigen_assert(rows()==b.rows());
|
| 337 |
+
|
| 338 |
+
Index rhsCols = b.cols();
|
| 339 |
+
Index size = b.rows();
|
| 340 |
+
DestDerived& dest(aDest.derived());
|
| 341 |
+
typedef typename DestDerived::Scalar DestScalar;
|
| 342 |
+
Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
|
| 343 |
+
Eigen::Matrix<DestScalar,Dynamic,1> tx(cols());
|
| 344 |
+
// We do not directly fill dest because sparse expressions have to be free of aliasing issue.
|
| 345 |
+
// For non square least-square problems, b and dest might not have the same size whereas they might alias each-other.
|
| 346 |
+
typename DestDerived::PlainObject tmp(cols(),rhsCols);
|
| 347 |
+
ComputationInfo global_info = Success;
|
| 348 |
+
for(Index k=0; k<rhsCols; ++k)
|
| 349 |
+
{
|
| 350 |
+
tb = b.col(k);
|
| 351 |
+
tx = dest.col(k);
|
| 352 |
+
derived()._solve_vector_with_guess_impl(tb,tx);
|
| 353 |
+
tmp.col(k) = tx.sparseView(0);
|
| 354 |
+
|
| 355 |
+
// The call to _solve_vector_with_guess_impl updates m_info, so if it failed for a previous column
|
| 356 |
+
// we need to restore it to the worst value.
|
| 357 |
+
if(m_info==NumericalIssue)
|
| 358 |
+
global_info = NumericalIssue;
|
| 359 |
+
else if(m_info==NoConvergence)
|
| 360 |
+
global_info = NoConvergence;
|
| 361 |
+
}
|
| 362 |
+
m_info = global_info;
|
| 363 |
+
dest.swap(tmp);
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
template<typename Rhs, typename DestDerived>
|
| 367 |
+
typename internal::enable_if<Rhs::ColsAtCompileTime!=1 && DestDerived::ColsAtCompileTime!=1>::type
|
| 368 |
+
_solve_with_guess_impl(const Rhs& b, MatrixBase<DestDerived> &aDest) const
|
| 369 |
+
{
|
| 370 |
+
eigen_assert(rows()==b.rows());
|
| 371 |
+
|
| 372 |
+
Index rhsCols = b.cols();
|
| 373 |
+
DestDerived& dest(aDest.derived());
|
| 374 |
+
ComputationInfo global_info = Success;
|
| 375 |
+
for(Index k=0; k<rhsCols; ++k)
|
| 376 |
+
{
|
| 377 |
+
typename DestDerived::ColXpr xk(dest,k);
|
| 378 |
+
typename Rhs::ConstColXpr bk(b,k);
|
| 379 |
+
derived()._solve_vector_with_guess_impl(bk,xk);
|
| 380 |
+
|
| 381 |
+
// The call to _solve_vector_with_guess updates m_info, so if it failed for a previous column
|
| 382 |
+
// we need to restore it to the worst value.
|
| 383 |
+
if(m_info==NumericalIssue)
|
| 384 |
+
global_info = NumericalIssue;
|
| 385 |
+
else if(m_info==NoConvergence)
|
| 386 |
+
global_info = NoConvergence;
|
| 387 |
+
}
|
| 388 |
+
m_info = global_info;
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
template<typename Rhs, typename DestDerived>
|
| 392 |
+
typename internal::enable_if<Rhs::ColsAtCompileTime==1 || DestDerived::ColsAtCompileTime==1>::type
|
| 393 |
+
_solve_with_guess_impl(const Rhs& b, MatrixBase<DestDerived> &dest) const
|
| 394 |
+
{
|
| 395 |
+
derived()._solve_vector_with_guess_impl(b,dest.derived());
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
/** \internal default initial guess = 0 */
|
| 399 |
+
template<typename Rhs,typename Dest>
|
| 400 |
+
void _solve_impl(const Rhs& b, Dest& x) const
|
| 401 |
+
{
|
| 402 |
+
x.setZero();
|
| 403 |
+
derived()._solve_with_guess_impl(b,x);
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
protected:
|
| 407 |
+
void init()
|
| 408 |
+
{
|
| 409 |
+
m_isInitialized = false;
|
| 410 |
+
m_analysisIsOk = false;
|
| 411 |
+
m_factorizationIsOk = false;
|
| 412 |
+
m_maxIterations = -1;
|
| 413 |
+
m_tolerance = NumTraits<Scalar>::epsilon();
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper;
|
| 417 |
+
typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType;
|
| 418 |
+
|
| 419 |
+
const ActualMatrixType& matrix() const
|
| 420 |
+
{
|
| 421 |
+
return m_matrixWrapper.matrix();
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
template<typename InputType>
|
| 425 |
+
void grab(const InputType &A)
|
| 426 |
+
{
|
| 427 |
+
m_matrixWrapper.grab(A);
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
MatrixWrapper m_matrixWrapper;
|
| 431 |
+
Preconditioner m_preconditioner;
|
| 432 |
+
|
| 433 |
+
Index m_maxIterations;
|
| 434 |
+
RealScalar m_tolerance;
|
| 435 |
+
|
| 436 |
+
mutable RealScalar m_error;
|
| 437 |
+
mutable Index m_iterations;
|
| 438 |
+
mutable ComputationInfo m_info;
|
| 439 |
+
mutable bool m_analysisIsOk, m_factorizationIsOk;
|
| 440 |
+
};
|
| 441 |
+
|
| 442 |
+
} // end namespace Eigen
|
| 443 |
+
|
| 444 |
+
#endif // EIGEN_ITERATIVE_SOLVER_BASE_H
|
include/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
|
| 11 |
+
#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
|
| 12 |
+
|
| 13 |
+
namespace Eigen {
|
| 14 |
+
|
| 15 |
+
namespace internal {
|
| 16 |
+
|
| 17 |
+
/** \internal Low-level conjugate gradient algorithm for least-square problems
|
| 18 |
+
* \param mat The matrix A
|
| 19 |
+
* \param rhs The right hand side vector b
|
| 20 |
+
* \param x On input and initial solution, on output the computed solution.
|
| 21 |
+
* \param precond A preconditioner being able to efficiently solve for an
|
| 22 |
+
* approximation of A'Ax=b (regardless of b)
|
| 23 |
+
* \param iters On input the max number of iteration, on output the number of performed iterations.
|
| 24 |
+
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
|
| 25 |
+
*/
|
| 26 |
+
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
| 27 |
+
EIGEN_DONT_INLINE
|
| 28 |
+
void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
| 29 |
+
const Preconditioner& precond, Index& iters,
|
| 30 |
+
typename Dest::RealScalar& tol_error)
|
| 31 |
+
{
|
| 32 |
+
using std::sqrt;
|
| 33 |
+
using std::abs;
|
| 34 |
+
typedef typename Dest::RealScalar RealScalar;
|
| 35 |
+
typedef typename Dest::Scalar Scalar;
|
| 36 |
+
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
| 37 |
+
|
| 38 |
+
RealScalar tol = tol_error;
|
| 39 |
+
Index maxIters = iters;
|
| 40 |
+
|
| 41 |
+
Index m = mat.rows(), n = mat.cols();
|
| 42 |
+
|
| 43 |
+
VectorType residual = rhs - mat * x;
|
| 44 |
+
VectorType normal_residual = mat.adjoint() * residual;
|
| 45 |
+
|
| 46 |
+
RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm();
|
| 47 |
+
if(rhsNorm2 == 0)
|
| 48 |
+
{
|
| 49 |
+
x.setZero();
|
| 50 |
+
iters = 0;
|
| 51 |
+
tol_error = 0;
|
| 52 |
+
return;
|
| 53 |
+
}
|
| 54 |
+
RealScalar threshold = tol*tol*rhsNorm2;
|
| 55 |
+
RealScalar residualNorm2 = normal_residual.squaredNorm();
|
| 56 |
+
if (residualNorm2 < threshold)
|
| 57 |
+
{
|
| 58 |
+
iters = 0;
|
| 59 |
+
tol_error = sqrt(residualNorm2 / rhsNorm2);
|
| 60 |
+
return;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
VectorType p(n);
|
| 64 |
+
p = precond.solve(normal_residual); // initial search direction
|
| 65 |
+
|
| 66 |
+
VectorType z(n), tmp(m);
|
| 67 |
+
RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM
|
| 68 |
+
Index i = 0;
|
| 69 |
+
while(i < maxIters)
|
| 70 |
+
{
|
| 71 |
+
tmp.noalias() = mat * p;
|
| 72 |
+
|
| 73 |
+
Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir
|
| 74 |
+
x += alpha * p; // update solution
|
| 75 |
+
residual -= alpha * tmp; // update residual
|
| 76 |
+
normal_residual = mat.adjoint() * residual; // update residual of the normal equation
|
| 77 |
+
|
| 78 |
+
residualNorm2 = normal_residual.squaredNorm();
|
| 79 |
+
if(residualNorm2 < threshold)
|
| 80 |
+
break;
|
| 81 |
+
|
| 82 |
+
z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual"
|
| 83 |
+
|
| 84 |
+
RealScalar absOld = absNew;
|
| 85 |
+
absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r
|
| 86 |
+
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
|
| 87 |
+
p = z + beta * p; // update search direction
|
| 88 |
+
i++;
|
| 89 |
+
}
|
| 90 |
+
tol_error = sqrt(residualNorm2 / rhsNorm2);
|
| 91 |
+
iters = i;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template< typename _MatrixType,
|
| 97 |
+
typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> >
|
| 98 |
+
class LeastSquaresConjugateGradient;
|
| 99 |
+
|
| 100 |
+
namespace internal {
|
| 101 |
+
|
| 102 |
+
template< typename _MatrixType, typename _Preconditioner>
|
| 103 |
+
struct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >
|
| 104 |
+
{
|
| 105 |
+
typedef _MatrixType MatrixType;
|
| 106 |
+
typedef _Preconditioner Preconditioner;
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/** \ingroup IterativeLinearSolvers_Module
|
| 112 |
+
* \brief A conjugate gradient solver for sparse (or dense) least-square problems
|
| 113 |
+
*
|
| 114 |
+
* This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm.
|
| 115 |
+
* The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability.
|
| 116 |
+
* Otherwise, the SparseLU or SparseQR classes might be preferable.
|
| 117 |
+
* The matrix A and the vectors x and b can be either dense or sparse.
|
| 118 |
+
*
|
| 119 |
+
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
|
| 120 |
+
* \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner
|
| 121 |
+
*
|
| 122 |
+
* \implsparsesolverconcept
|
| 123 |
+
*
|
| 124 |
+
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
| 125 |
+
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
| 126 |
+
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
| 127 |
+
*
|
| 128 |
+
* This class can be used as the direct solver classes. Here is a typical usage example:
|
| 129 |
+
\code
|
| 130 |
+
int m=1000000, n = 10000;
|
| 131 |
+
VectorXd x(n), b(m);
|
| 132 |
+
SparseMatrix<double> A(m,n);
|
| 133 |
+
// fill A and b
|
| 134 |
+
LeastSquaresConjugateGradient<SparseMatrix<double> > lscg;
|
| 135 |
+
lscg.compute(A);
|
| 136 |
+
x = lscg.solve(b);
|
| 137 |
+
std::cout << "#iterations: " << lscg.iterations() << std::endl;
|
| 138 |
+
std::cout << "estimated error: " << lscg.error() << std::endl;
|
| 139 |
+
// update b, and solve again
|
| 140 |
+
x = lscg.solve(b);
|
| 141 |
+
\endcode
|
| 142 |
+
*
|
| 143 |
+
* By default the iterations start with x=0 as an initial guess of the solution.
|
| 144 |
+
* One can control the start using the solveWithGuess() method.
|
| 145 |
+
*
|
| 146 |
+
* \sa class ConjugateGradient, SparseLU, SparseQR
|
| 147 |
+
*/
|
| 148 |
+
template< typename _MatrixType, typename _Preconditioner>
|
| 149 |
+
class LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >
|
| 150 |
+
{
|
| 151 |
+
typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base;
|
| 152 |
+
using Base::matrix;
|
| 153 |
+
using Base::m_error;
|
| 154 |
+
using Base::m_iterations;
|
| 155 |
+
using Base::m_info;
|
| 156 |
+
using Base::m_isInitialized;
|
| 157 |
+
public:
|
| 158 |
+
typedef _MatrixType MatrixType;
|
| 159 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 160 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 161 |
+
typedef _Preconditioner Preconditioner;
|
| 162 |
+
|
| 163 |
+
public:
|
| 164 |
+
|
| 165 |
+
/** Default constructor. */
|
| 166 |
+
LeastSquaresConjugateGradient() : Base() {}
|
| 167 |
+
|
| 168 |
+
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
| 169 |
+
*
|
| 170 |
+
* This constructor is a shortcut for the default constructor followed
|
| 171 |
+
* by a call to compute().
|
| 172 |
+
*
|
| 173 |
+
* \warning this class stores a reference to the matrix A as well as some
|
| 174 |
+
* precomputed values that depend on it. Therefore, if \a A is changed
|
| 175 |
+
* this class becomes invalid. Call compute() to update it with the new
|
| 176 |
+
* matrix A, or modify a copy of A.
|
| 177 |
+
*/
|
| 178 |
+
template<typename MatrixDerived>
|
| 179 |
+
explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
| 180 |
+
|
| 181 |
+
~LeastSquaresConjugateGradient() {}
|
| 182 |
+
|
| 183 |
+
/** \internal */
|
| 184 |
+
template<typename Rhs,typename Dest>
|
| 185 |
+
void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
|
| 186 |
+
{
|
| 187 |
+
m_iterations = Base::maxIterations();
|
| 188 |
+
m_error = Base::m_tolerance;
|
| 189 |
+
|
| 190 |
+
internal::least_square_conjugate_gradient(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error);
|
| 191 |
+
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
};
|
| 195 |
+
|
| 196 |
+
} // end namespace Eigen
|
| 197 |
+
|
| 198 |
+
#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
|
include/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#ifndef EIGEN_SOLVEWITHGUESS_H
|
| 11 |
+
#define EIGEN_SOLVEWITHGUESS_H
|
| 12 |
+
|
| 13 |
+
namespace Eigen {
|
| 14 |
+
|
| 15 |
+
template<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess;
|
| 16 |
+
|
| 17 |
+
/** \class SolveWithGuess
|
| 18 |
+
* \ingroup IterativeLinearSolvers_Module
|
| 19 |
+
*
|
| 20 |
+
* \brief Pseudo expression representing a solving operation
|
| 21 |
+
*
|
| 22 |
+
* \tparam Decomposition the type of the matrix or decomposion object
|
| 23 |
+
* \tparam Rhstype the type of the right-hand side
|
| 24 |
+
*
|
| 25 |
+
* This class represents an expression of A.solve(B)
|
| 26 |
+
* and most of the time this is the only way it is used.
|
| 27 |
+
*
|
| 28 |
+
*/
|
| 29 |
+
namespace internal {
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
template<typename Decomposition, typename RhsType, typename GuessType>
|
| 33 |
+
struct traits<SolveWithGuess<Decomposition, RhsType, GuessType> >
|
| 34 |
+
: traits<Solve<Decomposition,RhsType> >
|
| 35 |
+
{};
|
| 36 |
+
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
template<typename Decomposition, typename RhsType, typename GuessType>
|
| 41 |
+
class SolveWithGuess : public internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type
|
| 42 |
+
{
|
| 43 |
+
public:
|
| 44 |
+
typedef typename internal::traits<SolveWithGuess>::Scalar Scalar;
|
| 45 |
+
typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject;
|
| 46 |
+
typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base;
|
| 47 |
+
typedef typename internal::ref_selector<SolveWithGuess>::type Nested;
|
| 48 |
+
|
| 49 |
+
SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess)
|
| 50 |
+
: m_dec(dec), m_rhs(rhs), m_guess(guess)
|
| 51 |
+
{}
|
| 52 |
+
|
| 53 |
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
|
| 54 |
+
Index rows() const EIGEN_NOEXCEPT { return m_dec.cols(); }
|
| 55 |
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR
|
| 56 |
+
Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); }
|
| 57 |
+
|
| 58 |
+
EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }
|
| 59 |
+
EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; }
|
| 60 |
+
EIGEN_DEVICE_FUNC const GuessType& guess() const { return m_guess; }
|
| 61 |
+
|
| 62 |
+
protected:
|
| 63 |
+
const Decomposition &m_dec;
|
| 64 |
+
const RhsType &m_rhs;
|
| 65 |
+
const GuessType &m_guess;
|
| 66 |
+
|
| 67 |
+
private:
|
| 68 |
+
Scalar coeff(Index row, Index col) const;
|
| 69 |
+
Scalar coeff(Index i) const;
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
namespace internal {
|
| 73 |
+
|
| 74 |
+
// Evaluator of SolveWithGuess -> eval into a temporary
|
| 75 |
+
template<typename Decomposition, typename RhsType, typename GuessType>
|
| 76 |
+
struct evaluator<SolveWithGuess<Decomposition,RhsType, GuessType> >
|
| 77 |
+
: public evaluator<typename SolveWithGuess<Decomposition,RhsType,GuessType>::PlainObject>
|
| 78 |
+
{
|
| 79 |
+
typedef SolveWithGuess<Decomposition,RhsType,GuessType> SolveType;
|
| 80 |
+
typedef typename SolveType::PlainObject PlainObject;
|
| 81 |
+
typedef evaluator<PlainObject> Base;
|
| 82 |
+
|
| 83 |
+
evaluator(const SolveType& solve)
|
| 84 |
+
: m_result(solve.rows(), solve.cols())
|
| 85 |
+
{
|
| 86 |
+
::new (static_cast<Base*>(this)) Base(m_result);
|
| 87 |
+
m_result = solve.guess();
|
| 88 |
+
solve.dec()._solve_with_guess_impl(solve.rhs(), m_result);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
protected:
|
| 92 |
+
PlainObject m_result;
|
| 93 |
+
};
|
| 94 |
+
|
| 95 |
+
// Specialization for "dst = dec.solveWithGuess(rhs)"
|
| 96 |
+
// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere
|
| 97 |
+
template<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar>
|
| 98 |
+
struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
| 99 |
+
{
|
| 100 |
+
typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType;
|
| 101 |
+
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
| 102 |
+
{
|
| 103 |
+
Index dstRows = src.rows();
|
| 104 |
+
Index dstCols = src.cols();
|
| 105 |
+
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
| 106 |
+
dst.resize(dstRows, dstCols);
|
| 107 |
+
|
| 108 |
+
dst = src.guess();
|
| 109 |
+
src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/);
|
| 110 |
+
}
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
} // end namespace internal
|
| 114 |
+
|
| 115 |
+
} // end namespace Eigen
|
| 116 |
+
|
| 117 |
+
#endif // EIGEN_SOLVEWITHGUESS_H
|
include/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h
ADDED
|
@@ -0,0 +1,678 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#ifndef EIGEN_PASTIXSUPPORT_H
|
| 11 |
+
#define EIGEN_PASTIXSUPPORT_H
|
| 12 |
+
|
| 13 |
+
namespace Eigen {
|
| 14 |
+
|
| 15 |
+
#if defined(DCOMPLEX)
|
| 16 |
+
#define PASTIX_COMPLEX COMPLEX
|
| 17 |
+
#define PASTIX_DCOMPLEX DCOMPLEX
|
| 18 |
+
#else
|
| 19 |
+
#define PASTIX_COMPLEX std::complex<float>
|
| 20 |
+
#define PASTIX_DCOMPLEX std::complex<double>
|
| 21 |
+
#endif
|
| 22 |
+
|
| 23 |
+
/** \ingroup PaStiXSupport_Module
|
| 24 |
+
* \brief Interface to the PaStix solver
|
| 25 |
+
*
|
| 26 |
+
* This class is used to solve the linear systems A.X = B via the PaStix library.
|
| 27 |
+
* The matrix can be either real or complex, symmetric or not.
|
| 28 |
+
*
|
| 29 |
+
* \sa TutorialSparseDirectSolvers
|
| 30 |
+
*/
|
| 31 |
+
template<typename _MatrixType, bool IsStrSym = false> class PastixLU;
|
| 32 |
+
template<typename _MatrixType, int Options> class PastixLLT;
|
| 33 |
+
template<typename _MatrixType, int Options> class PastixLDLT;
|
| 34 |
+
|
| 35 |
+
namespace internal
|
| 36 |
+
{
|
| 37 |
+
|
| 38 |
+
template<class Pastix> struct pastix_traits;
|
| 39 |
+
|
| 40 |
+
template<typename _MatrixType>
|
| 41 |
+
struct pastix_traits< PastixLU<_MatrixType> >
|
| 42 |
+
{
|
| 43 |
+
typedef _MatrixType MatrixType;
|
| 44 |
+
typedef typename _MatrixType::Scalar Scalar;
|
| 45 |
+
typedef typename _MatrixType::RealScalar RealScalar;
|
| 46 |
+
typedef typename _MatrixType::StorageIndex StorageIndex;
|
| 47 |
+
};
|
| 48 |
+
|
| 49 |
+
template<typename _MatrixType, int Options>
|
| 50 |
+
struct pastix_traits< PastixLLT<_MatrixType,Options> >
|
| 51 |
+
{
|
| 52 |
+
typedef _MatrixType MatrixType;
|
| 53 |
+
typedef typename _MatrixType::Scalar Scalar;
|
| 54 |
+
typedef typename _MatrixType::RealScalar RealScalar;
|
| 55 |
+
typedef typename _MatrixType::StorageIndex StorageIndex;
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
template<typename _MatrixType, int Options>
|
| 59 |
+
struct pastix_traits< PastixLDLT<_MatrixType,Options> >
|
| 60 |
+
{
|
| 61 |
+
typedef _MatrixType MatrixType;
|
| 62 |
+
typedef typename _MatrixType::Scalar Scalar;
|
| 63 |
+
typedef typename _MatrixType::RealScalar RealScalar;
|
| 64 |
+
typedef typename _MatrixType::StorageIndex StorageIndex;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
|
| 68 |
+
{
|
| 69 |
+
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
|
| 70 |
+
if (nbrhs == 0) {x = NULL; nbrhs=1;}
|
| 71 |
+
s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
|
| 75 |
+
{
|
| 76 |
+
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
|
| 77 |
+
if (nbrhs == 0) {x = NULL; nbrhs=1;}
|
| 78 |
+
d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
|
| 82 |
+
{
|
| 83 |
+
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
|
| 84 |
+
if (nbrhs == 0) {x = NULL; nbrhs=1;}
|
| 85 |
+
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
|
| 89 |
+
{
|
| 90 |
+
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
|
| 91 |
+
if (nbrhs == 0) {x = NULL; nbrhs=1;}
|
| 92 |
+
z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// Convert the matrix to Fortran-style Numbering
|
| 96 |
+
template <typename MatrixType>
|
| 97 |
+
void c_to_fortran_numbering (MatrixType& mat)
|
| 98 |
+
{
|
| 99 |
+
if ( !(mat.outerIndexPtr()[0]) )
|
| 100 |
+
{
|
| 101 |
+
int i;
|
| 102 |
+
for(i = 0; i <= mat.rows(); ++i)
|
| 103 |
+
++mat.outerIndexPtr()[i];
|
| 104 |
+
for(i = 0; i < mat.nonZeros(); ++i)
|
| 105 |
+
++mat.innerIndexPtr()[i];
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
// Convert to C-style Numbering
|
| 110 |
+
template <typename MatrixType>
|
| 111 |
+
void fortran_to_c_numbering (MatrixType& mat)
|
| 112 |
+
{
|
| 113 |
+
// Check the Numbering
|
| 114 |
+
if ( mat.outerIndexPtr()[0] == 1 )
|
| 115 |
+
{ // Convert to C-style numbering
|
| 116 |
+
int i;
|
| 117 |
+
for(i = 0; i <= mat.rows(); ++i)
|
| 118 |
+
--mat.outerIndexPtr()[i];
|
| 119 |
+
for(i = 0; i < mat.nonZeros(); ++i)
|
| 120 |
+
--mat.innerIndexPtr()[i];
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
// This is the base class to interface with PaStiX functions.
|
| 126 |
+
// Users should not used this class directly.
|
| 127 |
+
template <class Derived>
|
| 128 |
+
class PastixBase : public SparseSolverBase<Derived>
|
| 129 |
+
{
|
| 130 |
+
protected:
|
| 131 |
+
typedef SparseSolverBase<Derived> Base;
|
| 132 |
+
using Base::derived;
|
| 133 |
+
using Base::m_isInitialized;
|
| 134 |
+
public:
|
| 135 |
+
using Base::_solve_impl;
|
| 136 |
+
|
| 137 |
+
typedef typename internal::pastix_traits<Derived>::MatrixType _MatrixType;
|
| 138 |
+
typedef _MatrixType MatrixType;
|
| 139 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 140 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 141 |
+
typedef typename MatrixType::StorageIndex StorageIndex;
|
| 142 |
+
typedef Matrix<Scalar,Dynamic,1> Vector;
|
| 143 |
+
typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;
|
| 144 |
+
enum {
|
| 145 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 146 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
public:
|
| 150 |
+
|
| 151 |
+
PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_pastixdata(0), m_size(0)
|
| 152 |
+
{
|
| 153 |
+
init();
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
~PastixBase()
|
| 157 |
+
{
|
| 158 |
+
clean();
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
template<typename Rhs,typename Dest>
|
| 162 |
+
bool _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
|
| 163 |
+
|
| 164 |
+
/** Returns a reference to the integer vector IPARM of PaStiX parameters
|
| 165 |
+
* to modify the default parameters.
|
| 166 |
+
* The statistics related to the different phases of factorization and solve are saved here as well
|
| 167 |
+
* \sa analyzePattern() factorize()
|
| 168 |
+
*/
|
| 169 |
+
Array<StorageIndex,IPARM_SIZE,1>& iparm()
|
| 170 |
+
{
|
| 171 |
+
return m_iparm;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
/** Return a reference to a particular index parameter of the IPARM vector
|
| 175 |
+
* \sa iparm()
|
| 176 |
+
*/
|
| 177 |
+
|
| 178 |
+
int& iparm(int idxparam)
|
| 179 |
+
{
|
| 180 |
+
return m_iparm(idxparam);
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
/** Returns a reference to the double vector DPARM of PaStiX parameters
|
| 184 |
+
* The statistics related to the different phases of factorization and solve are saved here as well
|
| 185 |
+
* \sa analyzePattern() factorize()
|
| 186 |
+
*/
|
| 187 |
+
Array<double,DPARM_SIZE,1>& dparm()
|
| 188 |
+
{
|
| 189 |
+
return m_dparm;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
/** Return a reference to a particular index parameter of the DPARM vector
|
| 194 |
+
* \sa dparm()
|
| 195 |
+
*/
|
| 196 |
+
double& dparm(int idxparam)
|
| 197 |
+
{
|
| 198 |
+
return m_dparm(idxparam);
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
inline Index cols() const { return m_size; }
|
| 202 |
+
inline Index rows() const { return m_size; }
|
| 203 |
+
|
| 204 |
+
/** \brief Reports whether previous computation was successful.
|
| 205 |
+
*
|
| 206 |
+
* \returns \c Success if computation was successful,
|
| 207 |
+
* \c NumericalIssue if the PaStiX reports a problem
|
| 208 |
+
* \c InvalidInput if the input matrix is invalid
|
| 209 |
+
*
|
| 210 |
+
* \sa iparm()
|
| 211 |
+
*/
|
| 212 |
+
ComputationInfo info() const
|
| 213 |
+
{
|
| 214 |
+
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
| 215 |
+
return m_info;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
protected:
|
| 219 |
+
|
| 220 |
+
// Initialize the Pastix data structure, check the matrix
|
| 221 |
+
void init();
|
| 222 |
+
|
| 223 |
+
// Compute the ordering and the symbolic factorization
|
| 224 |
+
void analyzePattern(ColSpMatrix& mat);
|
| 225 |
+
|
| 226 |
+
// Compute the numerical factorization
|
| 227 |
+
void factorize(ColSpMatrix& mat);
|
| 228 |
+
|
| 229 |
+
// Free all the data allocated by Pastix
|
| 230 |
+
void clean()
|
| 231 |
+
{
|
| 232 |
+
eigen_assert(m_initisOk && "The Pastix structure should be allocated first");
|
| 233 |
+
m_iparm(IPARM_START_TASK) = API_TASK_CLEAN;
|
| 234 |
+
m_iparm(IPARM_END_TASK) = API_TASK_CLEAN;
|
| 235 |
+
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
|
| 236 |
+
m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
void compute(ColSpMatrix& mat);
|
| 240 |
+
|
| 241 |
+
int m_initisOk;
|
| 242 |
+
int m_analysisIsOk;
|
| 243 |
+
int m_factorizationIsOk;
|
| 244 |
+
mutable ComputationInfo m_info;
|
| 245 |
+
mutable pastix_data_t *m_pastixdata; // Data structure for pastix
|
| 246 |
+
mutable int m_comm; // The MPI communicator identifier
|
| 247 |
+
mutable Array<int,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters
|
| 248 |
+
mutable Array<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters
|
| 249 |
+
mutable Matrix<StorageIndex,Dynamic,1> m_perm; // Permutation vector
|
| 250 |
+
mutable Matrix<StorageIndex,Dynamic,1> m_invp; // Inverse permutation vector
|
| 251 |
+
mutable int m_size; // Size of the matrix
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
/** Initialize the PaStiX data structure.
|
| 255 |
+
*A first call to this function fills iparm and dparm with the default PaStiX parameters
|
| 256 |
+
* \sa iparm() dparm()
|
| 257 |
+
*/
|
| 258 |
+
template <class Derived>
|
| 259 |
+
void PastixBase<Derived>::init()
|
| 260 |
+
{
|
| 261 |
+
m_size = 0;
|
| 262 |
+
m_iparm.setZero(IPARM_SIZE);
|
| 263 |
+
m_dparm.setZero(DPARM_SIZE);
|
| 264 |
+
|
| 265 |
+
m_iparm(IPARM_MODIFY_PARAMETER) = API_NO;
|
| 266 |
+
pastix(&m_pastixdata, MPI_COMM_WORLD,
|
| 267 |
+
0, 0, 0, 0,
|
| 268 |
+
0, 0, 0, 1, m_iparm.data(), m_dparm.data());
|
| 269 |
+
|
| 270 |
+
m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO;
|
| 271 |
+
m_iparm[IPARM_VERBOSE] = API_VERBOSE_NOT;
|
| 272 |
+
m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH;
|
| 273 |
+
m_iparm[IPARM_INCOMPLETE] = API_NO;
|
| 274 |
+
m_iparm[IPARM_OOC_LIMIT] = 2000;
|
| 275 |
+
m_iparm[IPARM_RHS_MAKING] = API_RHS_B;
|
| 276 |
+
m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
|
| 277 |
+
|
| 278 |
+
m_iparm(IPARM_START_TASK) = API_TASK_INIT;
|
| 279 |
+
m_iparm(IPARM_END_TASK) = API_TASK_INIT;
|
| 280 |
+
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
|
| 281 |
+
0, 0, 0, 0, m_iparm.data(), m_dparm.data());
|
| 282 |
+
|
| 283 |
+
// Check the returned error
|
| 284 |
+
if(m_iparm(IPARM_ERROR_NUMBER)) {
|
| 285 |
+
m_info = InvalidInput;
|
| 286 |
+
m_initisOk = false;
|
| 287 |
+
}
|
| 288 |
+
else {
|
| 289 |
+
m_info = Success;
|
| 290 |
+
m_initisOk = true;
|
| 291 |
+
}
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
template <class Derived>
|
| 295 |
+
void PastixBase<Derived>::compute(ColSpMatrix& mat)
|
| 296 |
+
{
|
| 297 |
+
eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared");
|
| 298 |
+
|
| 299 |
+
analyzePattern(mat);
|
| 300 |
+
factorize(mat);
|
| 301 |
+
|
| 302 |
+
m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
template <class Derived>
|
| 307 |
+
void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
|
| 308 |
+
{
|
| 309 |
+
eigen_assert(m_initisOk && "The initialization of PaSTiX failed");
|
| 310 |
+
|
| 311 |
+
// clean previous calls
|
| 312 |
+
if(m_size>0)
|
| 313 |
+
clean();
|
| 314 |
+
|
| 315 |
+
m_size = internal::convert_index<int>(mat.rows());
|
| 316 |
+
m_perm.resize(m_size);
|
| 317 |
+
m_invp.resize(m_size);
|
| 318 |
+
|
| 319 |
+
m_iparm(IPARM_START_TASK) = API_TASK_ORDERING;
|
| 320 |
+
m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE;
|
| 321 |
+
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
|
| 322 |
+
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
|
| 323 |
+
|
| 324 |
+
// Check the returned error
|
| 325 |
+
if(m_iparm(IPARM_ERROR_NUMBER))
|
| 326 |
+
{
|
| 327 |
+
m_info = NumericalIssue;
|
| 328 |
+
m_analysisIsOk = false;
|
| 329 |
+
}
|
| 330 |
+
else
|
| 331 |
+
{
|
| 332 |
+
m_info = Success;
|
| 333 |
+
m_analysisIsOk = true;
|
| 334 |
+
}
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
template <class Derived>
|
| 338 |
+
void PastixBase<Derived>::factorize(ColSpMatrix& mat)
|
| 339 |
+
{
|
| 340 |
+
// if(&m_cpyMat != &mat) m_cpyMat = mat;
|
| 341 |
+
eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
|
| 342 |
+
m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
|
| 343 |
+
m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
|
| 344 |
+
m_size = internal::convert_index<int>(mat.rows());
|
| 345 |
+
|
| 346 |
+
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
|
| 347 |
+
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
|
| 348 |
+
|
| 349 |
+
// Check the returned error
|
| 350 |
+
if(m_iparm(IPARM_ERROR_NUMBER))
|
| 351 |
+
{
|
| 352 |
+
m_info = NumericalIssue;
|
| 353 |
+
m_factorizationIsOk = false;
|
| 354 |
+
m_isInitialized = false;
|
| 355 |
+
}
|
| 356 |
+
else
|
| 357 |
+
{
|
| 358 |
+
m_info = Success;
|
| 359 |
+
m_factorizationIsOk = true;
|
| 360 |
+
m_isInitialized = true;
|
| 361 |
+
}
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
/* Solve the system */
|
| 365 |
+
template<typename Base>
|
| 366 |
+
template<typename Rhs,typename Dest>
|
| 367 |
+
bool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const
|
| 368 |
+
{
|
| 369 |
+
eigen_assert(m_isInitialized && "The matrix should be factorized first");
|
| 370 |
+
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
|
| 371 |
+
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
| 372 |
+
int rhs = 1;
|
| 373 |
+
|
| 374 |
+
x = b; /* on return, x is overwritten by the computed solution */
|
| 375 |
+
|
| 376 |
+
for (int i = 0; i < b.cols(); i++){
|
| 377 |
+
m_iparm[IPARM_START_TASK] = API_TASK_SOLVE;
|
| 378 |
+
m_iparm[IPARM_END_TASK] = API_TASK_REFINE;
|
| 379 |
+
|
| 380 |
+
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,
|
| 381 |
+
m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
// Check the returned error
|
| 385 |
+
m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue;
|
| 386 |
+
|
| 387 |
+
return m_iparm(IPARM_ERROR_NUMBER)==0;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
/** \ingroup PaStiXSupport_Module
|
| 391 |
+
* \class PastixLU
|
| 392 |
+
* \brief Sparse direct LU solver based on PaStiX library
|
| 393 |
+
*
|
| 394 |
+
* This class is used to solve the linear systems A.X = B with a supernodal LU
|
| 395 |
+
* factorization in the PaStiX library. The matrix A should be squared and nonsingular
|
| 396 |
+
* PaStiX requires that the matrix A has a symmetric structural pattern.
|
| 397 |
+
* This interface can symmetrize the input matrix otherwise.
|
| 398 |
+
* The vectors or matrices X and B can be either dense or sparse.
|
| 399 |
+
*
|
| 400 |
+
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
| 401 |
+
* \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false
|
| 402 |
+
* NOTE : Note that if the analysis and factorization phase are called separately,
|
| 403 |
+
* the input matrix will be symmetrized at each call, hence it is advised to
|
| 404 |
+
* symmetrize the matrix in a end-user program and set \p IsStrSym to true
|
| 405 |
+
*
|
| 406 |
+
* \implsparsesolverconcept
|
| 407 |
+
*
|
| 408 |
+
* \sa \ref TutorialSparseSolverConcept, class SparseLU
|
| 409 |
+
*
|
| 410 |
+
*/
|
| 411 |
+
template<typename _MatrixType, bool IsStrSym>
|
| 412 |
+
class PastixLU : public PastixBase< PastixLU<_MatrixType> >
|
| 413 |
+
{
|
| 414 |
+
public:
|
| 415 |
+
typedef _MatrixType MatrixType;
|
| 416 |
+
typedef PastixBase<PastixLU<MatrixType> > Base;
|
| 417 |
+
typedef typename Base::ColSpMatrix ColSpMatrix;
|
| 418 |
+
typedef typename MatrixType::StorageIndex StorageIndex;
|
| 419 |
+
|
| 420 |
+
public:
|
| 421 |
+
PastixLU() : Base()
|
| 422 |
+
{
|
| 423 |
+
init();
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
explicit PastixLU(const MatrixType& matrix):Base()
|
| 427 |
+
{
|
| 428 |
+
init();
|
| 429 |
+
compute(matrix);
|
| 430 |
+
}
|
| 431 |
+
/** Compute the LU supernodal factorization of \p matrix.
|
| 432 |
+
* iparm and dparm can be used to tune the PaStiX parameters.
|
| 433 |
+
* see the PaStiX user's manual
|
| 434 |
+
* \sa analyzePattern() factorize()
|
| 435 |
+
*/
|
| 436 |
+
void compute (const MatrixType& matrix)
|
| 437 |
+
{
|
| 438 |
+
m_structureIsUptodate = false;
|
| 439 |
+
ColSpMatrix temp;
|
| 440 |
+
grabMatrix(matrix, temp);
|
| 441 |
+
Base::compute(temp);
|
| 442 |
+
}
|
| 443 |
+
/** Compute the LU symbolic factorization of \p matrix using its sparsity pattern.
|
| 444 |
+
* Several ordering methods can be used at this step. See the PaStiX user's manual.
|
| 445 |
+
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
|
| 446 |
+
* \sa factorize()
|
| 447 |
+
*/
|
| 448 |
+
void analyzePattern(const MatrixType& matrix)
|
| 449 |
+
{
|
| 450 |
+
m_structureIsUptodate = false;
|
| 451 |
+
ColSpMatrix temp;
|
| 452 |
+
grabMatrix(matrix, temp);
|
| 453 |
+
Base::analyzePattern(temp);
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
/** Compute the LU supernodal factorization of \p matrix
|
| 457 |
+
* WARNING The matrix \p matrix should have the same structural pattern
|
| 458 |
+
* as the same used in the analysis phase.
|
| 459 |
+
* \sa analyzePattern()
|
| 460 |
+
*/
|
| 461 |
+
void factorize(const MatrixType& matrix)
|
| 462 |
+
{
|
| 463 |
+
ColSpMatrix temp;
|
| 464 |
+
grabMatrix(matrix, temp);
|
| 465 |
+
Base::factorize(temp);
|
| 466 |
+
}
|
| 467 |
+
protected:
|
| 468 |
+
|
| 469 |
+
void init()
|
| 470 |
+
{
|
| 471 |
+
m_structureIsUptodate = false;
|
| 472 |
+
m_iparm(IPARM_SYM) = API_SYM_NO;
|
| 473 |
+
m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
|
| 477 |
+
{
|
| 478 |
+
if(IsStrSym)
|
| 479 |
+
out = matrix;
|
| 480 |
+
else
|
| 481 |
+
{
|
| 482 |
+
if(!m_structureIsUptodate)
|
| 483 |
+
{
|
| 484 |
+
// update the transposed structure
|
| 485 |
+
m_transposedStructure = matrix.transpose();
|
| 486 |
+
|
| 487 |
+
// Set the elements of the matrix to zero
|
| 488 |
+
for (Index j=0; j<m_transposedStructure.outerSize(); ++j)
|
| 489 |
+
for(typename ColSpMatrix::InnerIterator it(m_transposedStructure, j); it; ++it)
|
| 490 |
+
it.valueRef() = 0.0;
|
| 491 |
+
|
| 492 |
+
m_structureIsUptodate = true;
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
out = m_transposedStructure + matrix;
|
| 496 |
+
}
|
| 497 |
+
internal::c_to_fortran_numbering(out);
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
using Base::m_iparm;
|
| 501 |
+
using Base::m_dparm;
|
| 502 |
+
|
| 503 |
+
ColSpMatrix m_transposedStructure;
|
| 504 |
+
bool m_structureIsUptodate;
|
| 505 |
+
};
|
| 506 |
+
|
| 507 |
+
/** \ingroup PaStiXSupport_Module
|
| 508 |
+
* \class PastixLLT
|
| 509 |
+
* \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
|
| 510 |
+
*
|
| 511 |
+
* This class is used to solve the linear systems A.X = B via a LL^T supernodal Cholesky factorization
|
| 512 |
+
* available in the PaStiX library. The matrix A should be symmetric and positive definite
|
| 513 |
+
* WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
|
| 514 |
+
* The vectors or matrices X and B can be either dense or sparse
|
| 515 |
+
*
|
| 516 |
+
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
| 517 |
+
* \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
|
| 518 |
+
*
|
| 519 |
+
* \implsparsesolverconcept
|
| 520 |
+
*
|
| 521 |
+
* \sa \ref TutorialSparseSolverConcept, class SimplicialLLT
|
| 522 |
+
*/
|
| 523 |
+
template<typename _MatrixType, int _UpLo>
|
| 524 |
+
class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> >
|
| 525 |
+
{
|
| 526 |
+
public:
|
| 527 |
+
typedef _MatrixType MatrixType;
|
| 528 |
+
typedef PastixBase<PastixLLT<MatrixType, _UpLo> > Base;
|
| 529 |
+
typedef typename Base::ColSpMatrix ColSpMatrix;
|
| 530 |
+
|
| 531 |
+
public:
|
| 532 |
+
enum { UpLo = _UpLo };
|
| 533 |
+
PastixLLT() : Base()
|
| 534 |
+
{
|
| 535 |
+
init();
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
explicit PastixLLT(const MatrixType& matrix):Base()
|
| 539 |
+
{
|
| 540 |
+
init();
|
| 541 |
+
compute(matrix);
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
/** Compute the L factor of the LL^T supernodal factorization of \p matrix
|
| 545 |
+
* \sa analyzePattern() factorize()
|
| 546 |
+
*/
|
| 547 |
+
void compute (const MatrixType& matrix)
|
| 548 |
+
{
|
| 549 |
+
ColSpMatrix temp;
|
| 550 |
+
grabMatrix(matrix, temp);
|
| 551 |
+
Base::compute(temp);
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
/** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern
|
| 555 |
+
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
|
| 556 |
+
* \sa factorize()
|
| 557 |
+
*/
|
| 558 |
+
void analyzePattern(const MatrixType& matrix)
|
| 559 |
+
{
|
| 560 |
+
ColSpMatrix temp;
|
| 561 |
+
grabMatrix(matrix, temp);
|
| 562 |
+
Base::analyzePattern(temp);
|
| 563 |
+
}
|
| 564 |
+
/** Compute the LL^T supernodal numerical factorization of \p matrix
|
| 565 |
+
* \sa analyzePattern()
|
| 566 |
+
*/
|
| 567 |
+
void factorize(const MatrixType& matrix)
|
| 568 |
+
{
|
| 569 |
+
ColSpMatrix temp;
|
| 570 |
+
grabMatrix(matrix, temp);
|
| 571 |
+
Base::factorize(temp);
|
| 572 |
+
}
|
| 573 |
+
protected:
|
| 574 |
+
using Base::m_iparm;
|
| 575 |
+
|
| 576 |
+
void init()
|
| 577 |
+
{
|
| 578 |
+
m_iparm(IPARM_SYM) = API_SYM_YES;
|
| 579 |
+
m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
|
| 583 |
+
{
|
| 584 |
+
out.resize(matrix.rows(), matrix.cols());
|
| 585 |
+
// Pastix supports only lower, column-major matrices
|
| 586 |
+
out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
|
| 587 |
+
internal::c_to_fortran_numbering(out);
|
| 588 |
+
}
|
| 589 |
+
};
|
| 590 |
+
|
| 591 |
+
/** \ingroup PaStiXSupport_Module
|
| 592 |
+
* \class PastixLDLT
|
| 593 |
+
* \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
|
| 594 |
+
*
|
| 595 |
+
* This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization
|
| 596 |
+
* available in the PaStiX library. The matrix A should be symmetric and positive definite
|
| 597 |
+
* WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
|
| 598 |
+
* The vectors or matrices X and B can be either dense or sparse
|
| 599 |
+
*
|
| 600 |
+
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
| 601 |
+
* \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
|
| 602 |
+
*
|
| 603 |
+
* \implsparsesolverconcept
|
| 604 |
+
*
|
| 605 |
+
* \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT
|
| 606 |
+
*/
|
| 607 |
+
template<typename _MatrixType, int _UpLo>
|
| 608 |
+
class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> >
|
| 609 |
+
{
|
| 610 |
+
public:
|
| 611 |
+
typedef _MatrixType MatrixType;
|
| 612 |
+
typedef PastixBase<PastixLDLT<MatrixType, _UpLo> > Base;
|
| 613 |
+
typedef typename Base::ColSpMatrix ColSpMatrix;
|
| 614 |
+
|
| 615 |
+
public:
|
| 616 |
+
enum { UpLo = _UpLo };
|
| 617 |
+
PastixLDLT():Base()
|
| 618 |
+
{
|
| 619 |
+
init();
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
explicit PastixLDLT(const MatrixType& matrix):Base()
|
| 623 |
+
{
|
| 624 |
+
init();
|
| 625 |
+
compute(matrix);
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
/** Compute the L and D factors of the LDL^T factorization of \p matrix
|
| 629 |
+
* \sa analyzePattern() factorize()
|
| 630 |
+
*/
|
| 631 |
+
void compute (const MatrixType& matrix)
|
| 632 |
+
{
|
| 633 |
+
ColSpMatrix temp;
|
| 634 |
+
grabMatrix(matrix, temp);
|
| 635 |
+
Base::compute(temp);
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
/** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern
|
| 639 |
+
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
|
| 640 |
+
* \sa factorize()
|
| 641 |
+
*/
|
| 642 |
+
void analyzePattern(const MatrixType& matrix)
|
| 643 |
+
{
|
| 644 |
+
ColSpMatrix temp;
|
| 645 |
+
grabMatrix(matrix, temp);
|
| 646 |
+
Base::analyzePattern(temp);
|
| 647 |
+
}
|
| 648 |
+
/** Compute the LDL^T supernodal numerical factorization of \p matrix
|
| 649 |
+
*
|
| 650 |
+
*/
|
| 651 |
+
void factorize(const MatrixType& matrix)
|
| 652 |
+
{
|
| 653 |
+
ColSpMatrix temp;
|
| 654 |
+
grabMatrix(matrix, temp);
|
| 655 |
+
Base::factorize(temp);
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
protected:
|
| 659 |
+
using Base::m_iparm;
|
| 660 |
+
|
| 661 |
+
void init()
|
| 662 |
+
{
|
| 663 |
+
m_iparm(IPARM_SYM) = API_SYM_YES;
|
| 664 |
+
m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
|
| 668 |
+
{
|
| 669 |
+
// Pastix supports only lower, column-major matrices
|
| 670 |
+
out.resize(matrix.rows(), matrix.cols());
|
| 671 |
+
out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
|
| 672 |
+
internal::c_to_fortran_numbering(out);
|
| 673 |
+
}
|
| 674 |
+
};
|
| 675 |
+
|
| 676 |
+
} // end namespace Eigen
|
| 677 |
+
|
| 678 |
+
#endif
|
include/eigen/Eigen/src/SVD/BDCSVD.h
ADDED
|
@@ -0,0 +1,1377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
|
| 5 |
+
// research report written by Ming Gu and Stanley C.Eisenstat
|
| 6 |
+
// The code variable names correspond to the names they used in their
|
| 7 |
+
// report
|
| 8 |
+
//
|
| 9 |
+
// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
|
| 10 |
+
// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
|
| 11 |
+
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
|
| 12 |
+
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
|
| 13 |
+
// Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
| 14 |
+
// Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 15 |
+
//
|
| 16 |
+
// Source Code Form is subject to the terms of the Mozilla
|
| 17 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 18 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 19 |
+
|
| 20 |
+
#ifndef EIGEN_BDCSVD_H
|
| 21 |
+
#define EIGEN_BDCSVD_H
|
| 22 |
+
// #define EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 23 |
+
// #define EIGEN_BDCSVD_SANITY_CHECKS
|
| 24 |
+
|
| 25 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 26 |
+
#undef eigen_internal_assert
|
| 27 |
+
#define eigen_internal_assert(X) assert(X);
|
| 28 |
+
#endif
|
| 29 |
+
|
| 30 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 31 |
+
#include <iostream>
|
| 32 |
+
#endif
|
| 33 |
+
|
| 34 |
+
namespace Eigen {
|
| 35 |
+
|
| 36 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 37 |
+
IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]");
|
| 38 |
+
#endif
|
| 39 |
+
|
| 40 |
+
template<typename _MatrixType> class BDCSVD;
|
| 41 |
+
|
| 42 |
+
namespace internal {
|
| 43 |
+
|
| 44 |
+
template<typename _MatrixType>
|
| 45 |
+
struct traits<BDCSVD<_MatrixType> >
|
| 46 |
+
: traits<_MatrixType>
|
| 47 |
+
{
|
| 48 |
+
typedef _MatrixType MatrixType;
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
} // end namespace internal
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
/** \ingroup SVD_Module
|
| 55 |
+
*
|
| 56 |
+
*
|
| 57 |
+
* \class BDCSVD
|
| 58 |
+
*
|
| 59 |
+
* \brief class Bidiagonal Divide and Conquer SVD
|
| 60 |
+
*
|
| 61 |
+
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
|
| 62 |
+
*
|
| 63 |
+
* This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,
|
| 64 |
+
* and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.
|
| 65 |
+
* You can control the switching size with the setSwitchSize() method, default is 16.
|
| 66 |
+
* For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly
|
| 67 |
+
* recommended and can several order of magnitude faster.
|
| 68 |
+
*
|
| 69 |
+
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
|
| 70 |
+
* For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
|
| 71 |
+
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
|
| 72 |
+
* significantly degrade the accuracy.
|
| 73 |
+
*
|
| 74 |
+
* \sa class JacobiSVD
|
| 75 |
+
*/
|
| 76 |
+
template<typename _MatrixType>
|
| 77 |
+
class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
|
| 78 |
+
{
|
| 79 |
+
typedef SVDBase<BDCSVD> Base;
|
| 80 |
+
|
| 81 |
+
public:
|
| 82 |
+
using Base::rows;
|
| 83 |
+
using Base::cols;
|
| 84 |
+
using Base::computeU;
|
| 85 |
+
using Base::computeV;
|
| 86 |
+
|
| 87 |
+
typedef _MatrixType MatrixType;
|
| 88 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 89 |
+
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
| 90 |
+
typedef typename NumTraits<RealScalar>::Literal Literal;
|
| 91 |
+
enum {
|
| 92 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 93 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 94 |
+
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
|
| 95 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
| 96 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
| 97 |
+
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
|
| 98 |
+
MatrixOptions = MatrixType::Options
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
typedef typename Base::MatrixUType MatrixUType;
|
| 102 |
+
typedef typename Base::MatrixVType MatrixVType;
|
| 103 |
+
typedef typename Base::SingularValuesType SingularValuesType;
|
| 104 |
+
|
| 105 |
+
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> MatrixX;
|
| 106 |
+
typedef Matrix<RealScalar, Dynamic, Dynamic, ColMajor> MatrixXr;
|
| 107 |
+
typedef Matrix<RealScalar, Dynamic, 1> VectorType;
|
| 108 |
+
typedef Array<RealScalar, Dynamic, 1> ArrayXr;
|
| 109 |
+
typedef Array<Index,1,Dynamic> ArrayXi;
|
| 110 |
+
typedef Ref<ArrayXr> ArrayRef;
|
| 111 |
+
typedef Ref<ArrayXi> IndicesRef;
|
| 112 |
+
|
| 113 |
+
/** \brief Default Constructor.
|
| 114 |
+
*
|
| 115 |
+
* The default constructor is useful in cases in which the user intends to
|
| 116 |
+
* perform decompositions via BDCSVD::compute(const MatrixType&).
|
| 117 |
+
*/
|
| 118 |
+
BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0)
|
| 119 |
+
{}
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
/** \brief Default Constructor with memory preallocation
|
| 123 |
+
*
|
| 124 |
+
* Like the default constructor but with preallocation of the internal data
|
| 125 |
+
* according to the specified problem size.
|
| 126 |
+
* \sa BDCSVD()
|
| 127 |
+
*/
|
| 128 |
+
BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
|
| 129 |
+
: m_algoswap(16), m_numIters(0)
|
| 130 |
+
{
|
| 131 |
+
allocate(rows, cols, computationOptions);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
/** \brief Constructor performing the decomposition of given matrix.
|
| 135 |
+
*
|
| 136 |
+
* \param matrix the matrix to decompose
|
| 137 |
+
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
|
| 138 |
+
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
|
| 139 |
+
* #ComputeFullV, #ComputeThinV.
|
| 140 |
+
*
|
| 141 |
+
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
|
| 142 |
+
* available with the (non - default) FullPivHouseholderQR preconditioner.
|
| 143 |
+
*/
|
| 144 |
+
BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
|
| 145 |
+
: m_algoswap(16), m_numIters(0)
|
| 146 |
+
{
|
| 147 |
+
compute(matrix, computationOptions);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
~BDCSVD()
|
| 151 |
+
{
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
/** \brief Method performing the decomposition of given matrix using custom options.
|
| 155 |
+
*
|
| 156 |
+
* \param matrix the matrix to decompose
|
| 157 |
+
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
|
| 158 |
+
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
|
| 159 |
+
* #ComputeFullV, #ComputeThinV.
|
| 160 |
+
*
|
| 161 |
+
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
|
| 162 |
+
* available with the (non - default) FullPivHouseholderQR preconditioner.
|
| 163 |
+
*/
|
| 164 |
+
BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
|
| 165 |
+
|
| 166 |
+
/** \brief Method performing the decomposition of given matrix using current options.
|
| 167 |
+
*
|
| 168 |
+
* \param matrix the matrix to decompose
|
| 169 |
+
*
|
| 170 |
+
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
|
| 171 |
+
*/
|
| 172 |
+
BDCSVD& compute(const MatrixType& matrix)
|
| 173 |
+
{
|
| 174 |
+
return compute(matrix, this->m_computationOptions);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
void setSwitchSize(int s)
|
| 178 |
+
{
|
| 179 |
+
eigen_assert(s>=3 && "BDCSVD the size of the algo switch has to be at least 3.");
|
| 180 |
+
m_algoswap = s;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
private:
|
| 184 |
+
void allocate(Index rows, Index cols, unsigned int computationOptions);
|
| 185 |
+
void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
|
| 186 |
+
void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
|
| 187 |
+
void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
|
| 188 |
+
void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);
|
| 189 |
+
void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);
|
| 190 |
+
void deflation43(Index firstCol, Index shift, Index i, Index size);
|
| 191 |
+
void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
|
| 192 |
+
void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
|
| 193 |
+
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
|
| 194 |
+
void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
|
| 195 |
+
void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);
|
| 196 |
+
static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
|
| 197 |
+
|
| 198 |
+
protected:
|
| 199 |
+
MatrixXr m_naiveU, m_naiveV;
|
| 200 |
+
MatrixXr m_computed;
|
| 201 |
+
Index m_nRec;
|
| 202 |
+
ArrayXr m_workspace;
|
| 203 |
+
ArrayXi m_workspaceI;
|
| 204 |
+
int m_algoswap;
|
| 205 |
+
bool m_isTranspose, m_compU, m_compV;
|
| 206 |
+
|
| 207 |
+
using Base::m_singularValues;
|
| 208 |
+
using Base::m_diagSize;
|
| 209 |
+
using Base::m_computeFullU;
|
| 210 |
+
using Base::m_computeFullV;
|
| 211 |
+
using Base::m_computeThinU;
|
| 212 |
+
using Base::m_computeThinV;
|
| 213 |
+
using Base::m_matrixU;
|
| 214 |
+
using Base::m_matrixV;
|
| 215 |
+
using Base::m_info;
|
| 216 |
+
using Base::m_isInitialized;
|
| 217 |
+
using Base::m_nonzeroSingularValues;
|
| 218 |
+
|
| 219 |
+
public:
|
| 220 |
+
int m_numIters;
|
| 221 |
+
}; //end class BDCSVD
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
// Method to allocate and initialize matrix and attributes
|
| 225 |
+
template<typename MatrixType>
|
| 226 |
+
void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
|
| 227 |
+
{
|
| 228 |
+
m_isTranspose = (cols > rows);
|
| 229 |
+
|
| 230 |
+
if (Base::allocate(rows, cols, computationOptions))
|
| 231 |
+
return;
|
| 232 |
+
|
| 233 |
+
m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
|
| 234 |
+
m_compU = computeV();
|
| 235 |
+
m_compV = computeU();
|
| 236 |
+
if (m_isTranspose)
|
| 237 |
+
std::swap(m_compU, m_compV);
|
| 238 |
+
|
| 239 |
+
if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );
|
| 240 |
+
else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );
|
| 241 |
+
|
| 242 |
+
if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);
|
| 243 |
+
|
| 244 |
+
m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);
|
| 245 |
+
m_workspaceI.resize(3*m_diagSize);
|
| 246 |
+
}// end allocate
|
| 247 |
+
|
| 248 |
+
template<typename MatrixType>
|
| 249 |
+
BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
|
| 250 |
+
{
|
| 251 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 252 |
+
std::cout << "\n\n\n======================================================================================================================\n\n\n";
|
| 253 |
+
#endif
|
| 254 |
+
allocate(matrix.rows(), matrix.cols(), computationOptions);
|
| 255 |
+
using std::abs;
|
| 256 |
+
|
| 257 |
+
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
|
| 258 |
+
|
| 259 |
+
//**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return
|
| 260 |
+
if(matrix.cols() < m_algoswap)
|
| 261 |
+
{
|
| 262 |
+
// FIXME this line involves temporaries
|
| 263 |
+
JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
|
| 264 |
+
m_isInitialized = true;
|
| 265 |
+
m_info = jsvd.info();
|
| 266 |
+
if (m_info == Success || m_info == NoConvergence) {
|
| 267 |
+
if(computeU()) m_matrixU = jsvd.matrixU();
|
| 268 |
+
if(computeV()) m_matrixV = jsvd.matrixV();
|
| 269 |
+
m_singularValues = jsvd.singularValues();
|
| 270 |
+
m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
|
| 271 |
+
}
|
| 272 |
+
return *this;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
//**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
|
| 276 |
+
RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
|
| 277 |
+
if (!(numext::isfinite)(scale)) {
|
| 278 |
+
m_isInitialized = true;
|
| 279 |
+
m_info = InvalidInput;
|
| 280 |
+
return *this;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
if(scale==Literal(0)) scale = Literal(1);
|
| 284 |
+
MatrixX copy;
|
| 285 |
+
if (m_isTranspose) copy = matrix.adjoint()/scale;
|
| 286 |
+
else copy = matrix/scale;
|
| 287 |
+
|
| 288 |
+
//**** step 1 - Bidiagonalization
|
| 289 |
+
// FIXME this line involves temporaries
|
| 290 |
+
internal::UpperBidiagonalization<MatrixX> bid(copy);
|
| 291 |
+
|
| 292 |
+
//**** step 2 - Divide & Conquer
|
| 293 |
+
m_naiveU.setZero();
|
| 294 |
+
m_naiveV.setZero();
|
| 295 |
+
// FIXME this line involves a temporary matrix
|
| 296 |
+
m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
|
| 297 |
+
m_computed.template bottomRows<1>().setZero();
|
| 298 |
+
divide(0, m_diagSize - 1, 0, 0, 0);
|
| 299 |
+
if (m_info != Success && m_info != NoConvergence) {
|
| 300 |
+
m_isInitialized = true;
|
| 301 |
+
return *this;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
//**** step 3 - Copy singular values and vectors
|
| 305 |
+
for (int i=0; i<m_diagSize; i++)
|
| 306 |
+
{
|
| 307 |
+
RealScalar a = abs(m_computed.coeff(i, i));
|
| 308 |
+
m_singularValues.coeffRef(i) = a * scale;
|
| 309 |
+
if (a<considerZero)
|
| 310 |
+
{
|
| 311 |
+
m_nonzeroSingularValues = i;
|
| 312 |
+
m_singularValues.tail(m_diagSize - i - 1).setZero();
|
| 313 |
+
break;
|
| 314 |
+
}
|
| 315 |
+
else if (i == m_diagSize - 1)
|
| 316 |
+
{
|
| 317 |
+
m_nonzeroSingularValues = i + 1;
|
| 318 |
+
break;
|
| 319 |
+
}
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 323 |
+
// std::cout << "m_naiveU\n" << m_naiveU << "\n\n";
|
| 324 |
+
// std::cout << "m_naiveV\n" << m_naiveV << "\n\n";
|
| 325 |
+
#endif
|
| 326 |
+
if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);
|
| 327 |
+
else copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);
|
| 328 |
+
|
| 329 |
+
m_isInitialized = true;
|
| 330 |
+
return *this;
|
| 331 |
+
}// end compute
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
template<typename MatrixType>
|
| 335 |
+
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
|
| 336 |
+
void BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
|
| 337 |
+
{
|
| 338 |
+
// Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
|
| 339 |
+
if (computeU())
|
| 340 |
+
{
|
| 341 |
+
Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();
|
| 342 |
+
m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
|
| 343 |
+
m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
|
| 344 |
+
householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer
|
| 345 |
+
}
|
| 346 |
+
if (computeV())
|
| 347 |
+
{
|
| 348 |
+
Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();
|
| 349 |
+
m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
|
| 350 |
+
m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
|
| 351 |
+
householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer
|
| 352 |
+
}
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
/** \internal
|
| 356 |
+
* Performs A = A * B exploiting the special structure of the matrix A. Splitting A as:
|
| 357 |
+
* A = [A1]
|
| 358 |
+
* [A2]
|
| 359 |
+
* such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros.
|
| 360 |
+
* We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large
|
| 361 |
+
* enough.
|
| 362 |
+
*/
|
| 363 |
+
template<typename MatrixType>
|
| 364 |
+
void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
|
| 365 |
+
{
|
| 366 |
+
Index n = A.rows();
|
| 367 |
+
if(n>100)
|
| 368 |
+
{
|
| 369 |
+
// If the matrices are large enough, let's exploit the sparse structure of A by
|
| 370 |
+
// splitting it in half (wrt n1), and packing the non-zero columns.
|
| 371 |
+
Index n2 = n - n1;
|
| 372 |
+
Map<MatrixXr> A1(m_workspace.data() , n1, n);
|
| 373 |
+
Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);
|
| 374 |
+
Map<MatrixXr> B1(m_workspace.data()+ n*n, n, n);
|
| 375 |
+
Map<MatrixXr> B2(m_workspace.data()+2*n*n, n, n);
|
| 376 |
+
Index k1=0, k2=0;
|
| 377 |
+
for(Index j=0; j<n; ++j)
|
| 378 |
+
{
|
| 379 |
+
if( (A.col(j).head(n1).array()!=Literal(0)).any() )
|
| 380 |
+
{
|
| 381 |
+
A1.col(k1) = A.col(j).head(n1);
|
| 382 |
+
B1.row(k1) = B.row(j);
|
| 383 |
+
++k1;
|
| 384 |
+
}
|
| 385 |
+
if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
|
| 386 |
+
{
|
| 387 |
+
A2.col(k2) = A.col(j).tail(n2);
|
| 388 |
+
B2.row(k2) = B.row(j);
|
| 389 |
+
++k2;
|
| 390 |
+
}
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
A.topRows(n1).noalias() = A1.leftCols(k1) * B1.topRows(k1);
|
| 394 |
+
A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);
|
| 395 |
+
}
|
| 396 |
+
else
|
| 397 |
+
{
|
| 398 |
+
Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);
|
| 399 |
+
tmp.noalias() = A*B;
|
| 400 |
+
A = tmp;
|
| 401 |
+
}
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
|
| 405 |
+
// place of the submatrix we are currently working on.
|
| 406 |
+
|
| 407 |
+
//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
|
| 408 |
+
//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
|
| 409 |
+
// lastCol + 1 - firstCol is the size of the submatrix.
|
| 410 |
+
//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
|
| 411 |
+
//@param firstColW : Same as firstRowW with the column.
|
| 412 |
+
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
|
| 413 |
+
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
|
| 414 |
+
template<typename MatrixType>
|
| 415 |
+
void BDCSVD<MatrixType>::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
|
| 416 |
+
{
|
| 417 |
+
// requires rows = cols + 1;
|
| 418 |
+
using std::pow;
|
| 419 |
+
using std::sqrt;
|
| 420 |
+
using std::abs;
|
| 421 |
+
const Index n = lastCol - firstCol + 1;
|
| 422 |
+
const Index k = n/2;
|
| 423 |
+
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
|
| 424 |
+
RealScalar alphaK;
|
| 425 |
+
RealScalar betaK;
|
| 426 |
+
RealScalar r0;
|
| 427 |
+
RealScalar lambda, phi, c0, s0;
|
| 428 |
+
VectorType l, f;
|
| 429 |
+
// We use the other algorithm which is more efficient for small
|
| 430 |
+
// matrices.
|
| 431 |
+
if (n < m_algoswap)
|
| 432 |
+
{
|
| 433 |
+
// FIXME this line involves temporaries
|
| 434 |
+
JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
|
| 435 |
+
m_info = b.info();
|
| 436 |
+
if (m_info != Success && m_info != NoConvergence) return;
|
| 437 |
+
if (m_compU)
|
| 438 |
+
m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
|
| 439 |
+
else
|
| 440 |
+
{
|
| 441 |
+
m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);
|
| 442 |
+
m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);
|
| 443 |
+
}
|
| 444 |
+
if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();
|
| 445 |
+
m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
|
| 446 |
+
m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);
|
| 447 |
+
return;
|
| 448 |
+
}
|
| 449 |
+
// We use the divide and conquer algorithm
|
| 450 |
+
alphaK = m_computed(firstCol + k, firstCol + k);
|
| 451 |
+
betaK = m_computed(firstCol + k + 1, firstCol + k);
|
| 452 |
+
// The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
|
| 453 |
+
// and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
|
| 454 |
+
// right submatrix before the left one.
|
| 455 |
+
divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
|
| 456 |
+
if (m_info != Success && m_info != NoConvergence) return;
|
| 457 |
+
divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
|
| 458 |
+
if (m_info != Success && m_info != NoConvergence) return;
|
| 459 |
+
|
| 460 |
+
if (m_compU)
|
| 461 |
+
{
|
| 462 |
+
lambda = m_naiveU(firstCol + k, firstCol + k);
|
| 463 |
+
phi = m_naiveU(firstCol + k + 1, lastCol + 1);
|
| 464 |
+
}
|
| 465 |
+
else
|
| 466 |
+
{
|
| 467 |
+
lambda = m_naiveU(1, firstCol + k);
|
| 468 |
+
phi = m_naiveU(0, lastCol + 1);
|
| 469 |
+
}
|
| 470 |
+
r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));
|
| 471 |
+
if (m_compU)
|
| 472 |
+
{
|
| 473 |
+
l = m_naiveU.row(firstCol + k).segment(firstCol, k);
|
| 474 |
+
f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
|
| 475 |
+
}
|
| 476 |
+
else
|
| 477 |
+
{
|
| 478 |
+
l = m_naiveU.row(1).segment(firstCol, k);
|
| 479 |
+
f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
|
| 480 |
+
}
|
| 481 |
+
if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
|
| 482 |
+
if (r0<considerZero)
|
| 483 |
+
{
|
| 484 |
+
c0 = Literal(1);
|
| 485 |
+
s0 = Literal(0);
|
| 486 |
+
}
|
| 487 |
+
else
|
| 488 |
+
{
|
| 489 |
+
c0 = alphaK * lambda / r0;
|
| 490 |
+
s0 = betaK * phi / r0;
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 494 |
+
assert(m_naiveU.allFinite());
|
| 495 |
+
assert(m_naiveV.allFinite());
|
| 496 |
+
assert(m_computed.allFinite());
|
| 497 |
+
#endif
|
| 498 |
+
|
| 499 |
+
if (m_compU)
|
| 500 |
+
{
|
| 501 |
+
MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
|
| 502 |
+
// we shiftW Q1 to the right
|
| 503 |
+
for (Index i = firstCol + k - 1; i >= firstCol; i--)
|
| 504 |
+
m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);
|
| 505 |
+
// we shift q1 at the left with a factor c0
|
| 506 |
+
m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);
|
| 507 |
+
// last column = q1 * - s0
|
| 508 |
+
m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));
|
| 509 |
+
// first column = q2 * s0
|
| 510 |
+
m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0;
|
| 511 |
+
// q2 *= c0
|
| 512 |
+
m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
|
| 513 |
+
}
|
| 514 |
+
else
|
| 515 |
+
{
|
| 516 |
+
RealScalar q1 = m_naiveU(0, firstCol + k);
|
| 517 |
+
// we shift Q1 to the right
|
| 518 |
+
for (Index i = firstCol + k - 1; i >= firstCol; i--)
|
| 519 |
+
m_naiveU(0, i + 1) = m_naiveU(0, i);
|
| 520 |
+
// we shift q1 at the left with a factor c0
|
| 521 |
+
m_naiveU(0, firstCol) = (q1 * c0);
|
| 522 |
+
// last column = q1 * - s0
|
| 523 |
+
m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
|
| 524 |
+
// first column = q2 * s0
|
| 525 |
+
m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
|
| 526 |
+
// q2 *= c0
|
| 527 |
+
m_naiveU(1, lastCol + 1) *= c0;
|
| 528 |
+
m_naiveU.row(1).segment(firstCol + 1, k).setZero();
|
| 529 |
+
m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 533 |
+
assert(m_naiveU.allFinite());
|
| 534 |
+
assert(m_naiveV.allFinite());
|
| 535 |
+
assert(m_computed.allFinite());
|
| 536 |
+
#endif
|
| 537 |
+
|
| 538 |
+
m_computed(firstCol + shift, firstCol + shift) = r0;
|
| 539 |
+
m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();
|
| 540 |
+
m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();
|
| 541 |
+
|
| 542 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 543 |
+
ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
|
| 544 |
+
#endif
|
| 545 |
+
// Second part: try to deflate singular values in combined matrix
|
| 546 |
+
deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
|
| 547 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 548 |
+
ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
|
| 549 |
+
std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n";
|
| 550 |
+
std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n";
|
| 551 |
+
std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n";
|
| 552 |
+
static int count = 0;
|
| 553 |
+
std::cout << "# " << ++count << "\n\n";
|
| 554 |
+
assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());
|
| 555 |
+
// assert(count<681);
|
| 556 |
+
// assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());
|
| 557 |
+
#endif
|
| 558 |
+
|
| 559 |
+
// Third part: compute SVD of combined matrix
|
| 560 |
+
MatrixXr UofSVD, VofSVD;
|
| 561 |
+
VectorType singVals;
|
| 562 |
+
computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
|
| 563 |
+
|
| 564 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 565 |
+
assert(UofSVD.allFinite());
|
| 566 |
+
assert(VofSVD.allFinite());
|
| 567 |
+
#endif
|
| 568 |
+
|
| 569 |
+
if (m_compU)
|
| 570 |
+
structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);
|
| 571 |
+
else
|
| 572 |
+
{
|
| 573 |
+
Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);
|
| 574 |
+
tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;
|
| 575 |
+
m_naiveU.middleCols(firstCol, n + 1) = tmp;
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);
|
| 579 |
+
|
| 580 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 581 |
+
assert(m_naiveU.allFinite());
|
| 582 |
+
assert(m_naiveV.allFinite());
|
| 583 |
+
assert(m_computed.allFinite());
|
| 584 |
+
#endif
|
| 585 |
+
|
| 586 |
+
m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
|
| 587 |
+
m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
|
| 588 |
+
}// end divide
|
| 589 |
+
|
| 590 |
+
// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
|
| 591 |
+
// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
|
| 592 |
+
// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
|
| 593 |
+
// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.
|
| 594 |
+
//
|
| 595 |
+
// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
|
| 596 |
+
// handling of round-off errors, be consistent in ordering
|
| 597 |
+
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
|
| 598 |
+
template <typename MatrixType>
|
| 599 |
+
void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
|
| 600 |
+
{
|
| 601 |
+
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
|
| 602 |
+
using std::abs;
|
| 603 |
+
ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
|
| 604 |
+
m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal();
|
| 605 |
+
ArrayRef diag = m_workspace.head(n);
|
| 606 |
+
diag(0) = Literal(0);
|
| 607 |
+
|
| 608 |
+
// Allocate space for singular values and vectors
|
| 609 |
+
singVals.resize(n);
|
| 610 |
+
U.resize(n+1, n+1);
|
| 611 |
+
if (m_compV) V.resize(n, n);
|
| 612 |
+
|
| 613 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 614 |
+
if (col0.hasNaN() || diag.hasNaN())
|
| 615 |
+
std::cout << "\n\nHAS NAN\n\n";
|
| 616 |
+
#endif
|
| 617 |
+
|
| 618 |
+
// Many singular values might have been deflated, the zero ones have been moved to the end,
|
| 619 |
+
// but others are interleaved and we must ignore them at this stage.
|
| 620 |
+
// To this end, let's compute a permutation skipping them:
|
| 621 |
+
Index actual_n = n;
|
| 622 |
+
while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); }
|
| 623 |
+
Index m = 0; // size of the deflated problem
|
| 624 |
+
for(Index k=0;k<actual_n;++k)
|
| 625 |
+
if(abs(col0(k))>considerZero)
|
| 626 |
+
m_workspaceI(m++) = k;
|
| 627 |
+
Map<ArrayXi> perm(m_workspaceI.data(),m);
|
| 628 |
+
|
| 629 |
+
Map<ArrayXr> shifts(m_workspace.data()+1*n, n);
|
| 630 |
+
Map<ArrayXr> mus(m_workspace.data()+2*n, n);
|
| 631 |
+
Map<ArrayXr> zhat(m_workspace.data()+3*n, n);
|
| 632 |
+
|
| 633 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 634 |
+
std::cout << "computeSVDofM using:\n";
|
| 635 |
+
std::cout << " z: " << col0.transpose() << "\n";
|
| 636 |
+
std::cout << " d: " << diag.transpose() << "\n";
|
| 637 |
+
#endif
|
| 638 |
+
|
| 639 |
+
// Compute singVals, shifts, and mus
|
| 640 |
+
computeSingVals(col0, diag, perm, singVals, shifts, mus);
|
| 641 |
+
|
| 642 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 643 |
+
std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n";
|
| 644 |
+
std::cout << " sing-val: " << singVals.transpose() << "\n";
|
| 645 |
+
std::cout << " mu: " << mus.transpose() << "\n";
|
| 646 |
+
std::cout << " shift: " << shifts.transpose() << "\n";
|
| 647 |
+
|
| 648 |
+
{
|
| 649 |
+
std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
|
| 650 |
+
std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
|
| 651 |
+
assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all());
|
| 652 |
+
std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
|
| 653 |
+
assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all());
|
| 654 |
+
}
|
| 655 |
+
#endif
|
| 656 |
+
|
| 657 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 658 |
+
assert(singVals.allFinite());
|
| 659 |
+
assert(mus.allFinite());
|
| 660 |
+
assert(shifts.allFinite());
|
| 661 |
+
#endif
|
| 662 |
+
|
| 663 |
+
// Compute zhat
|
| 664 |
+
perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);
|
| 665 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 666 |
+
std::cout << " zhat: " << zhat.transpose() << "\n";
|
| 667 |
+
#endif
|
| 668 |
+
|
| 669 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 670 |
+
assert(zhat.allFinite());
|
| 671 |
+
#endif
|
| 672 |
+
|
| 673 |
+
computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);
|
| 674 |
+
|
| 675 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 676 |
+
std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n";
|
| 677 |
+
std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n";
|
| 678 |
+
#endif
|
| 679 |
+
|
| 680 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 681 |
+
assert(m_naiveU.allFinite());
|
| 682 |
+
assert(m_naiveV.allFinite());
|
| 683 |
+
assert(m_computed.allFinite());
|
| 684 |
+
assert(U.allFinite());
|
| 685 |
+
assert(V.allFinite());
|
| 686 |
+
// assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
|
| 687 |
+
// assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
|
| 688 |
+
#endif
|
| 689 |
+
|
| 690 |
+
// Because of deflation, the singular values might not be completely sorted.
|
| 691 |
+
// Fortunately, reordering them is a O(n) problem
|
| 692 |
+
for(Index i=0; i<actual_n-1; ++i)
|
| 693 |
+
{
|
| 694 |
+
if(singVals(i)>singVals(i+1))
|
| 695 |
+
{
|
| 696 |
+
using std::swap;
|
| 697 |
+
swap(singVals(i),singVals(i+1));
|
| 698 |
+
U.col(i).swap(U.col(i+1));
|
| 699 |
+
if(m_compV) V.col(i).swap(V.col(i+1));
|
| 700 |
+
}
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 704 |
+
{
|
| 705 |
+
bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all();
|
| 706 |
+
if(!singular_values_sorted)
|
| 707 |
+
std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n";
|
| 708 |
+
assert(singular_values_sorted);
|
| 709 |
+
}
|
| 710 |
+
#endif
|
| 711 |
+
|
| 712 |
+
// Reverse order so that singular values in increased order
|
| 713 |
+
// Because of deflation, the zeros singular-values are already at the end
|
| 714 |
+
singVals.head(actual_n).reverseInPlace();
|
| 715 |
+
U.leftCols(actual_n).rowwise().reverseInPlace();
|
| 716 |
+
if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();
|
| 717 |
+
|
| 718 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 719 |
+
JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );
|
| 720 |
+
std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n";
|
| 721 |
+
std::cout << " * sing-val: " << singVals.transpose() << "\n";
|
| 722 |
+
// std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n";
|
| 723 |
+
#endif
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
template <typename MatrixType>
|
| 727 |
+
typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
|
| 728 |
+
{
|
| 729 |
+
Index m = perm.size();
|
| 730 |
+
RealScalar res = Literal(1);
|
| 731 |
+
for(Index i=0; i<m; ++i)
|
| 732 |
+
{
|
| 733 |
+
Index j = perm(i);
|
| 734 |
+
// The following expression could be rewritten to involve only a single division,
|
| 735 |
+
// but this would make the expression more sensitive to overflow.
|
| 736 |
+
res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
|
| 737 |
+
}
|
| 738 |
+
return res;
|
| 739 |
+
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
template <typename MatrixType>
|
| 743 |
+
void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
|
| 744 |
+
VectorType& singVals, ArrayRef shifts, ArrayRef mus)
|
| 745 |
+
{
|
| 746 |
+
using std::abs;
|
| 747 |
+
using std::swap;
|
| 748 |
+
using std::sqrt;
|
| 749 |
+
|
| 750 |
+
Index n = col0.size();
|
| 751 |
+
Index actual_n = n;
|
| 752 |
+
// Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
|
| 753 |
+
// because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
|
| 754 |
+
while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
|
| 755 |
+
|
| 756 |
+
for (Index k = 0; k < n; ++k)
|
| 757 |
+
{
|
| 758 |
+
if (col0(k) == Literal(0) || actual_n==1)
|
| 759 |
+
{
|
| 760 |
+
// if col0(k) == 0, then entry is deflated, so singular value is on diagonal
|
| 761 |
+
// if actual_n==1, then the deflated problem is already diagonalized
|
| 762 |
+
singVals(k) = k==0 ? col0(0) : diag(k);
|
| 763 |
+
mus(k) = Literal(0);
|
| 764 |
+
shifts(k) = k==0 ? col0(0) : diag(k);
|
| 765 |
+
continue;
|
| 766 |
+
}
|
| 767 |
+
|
| 768 |
+
// otherwise, use secular equation to find singular value
|
| 769 |
+
RealScalar left = diag(k);
|
| 770 |
+
RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());
|
| 771 |
+
if(k==actual_n-1)
|
| 772 |
+
right = (diag(actual_n-1) + col0.matrix().norm());
|
| 773 |
+
else
|
| 774 |
+
{
|
| 775 |
+
// Skip deflated singular values,
|
| 776 |
+
// recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
|
| 777 |
+
// This should be equivalent to using perm[]
|
| 778 |
+
Index l = k+1;
|
| 779 |
+
while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
|
| 780 |
+
right = diag(l);
|
| 781 |
+
}
|
| 782 |
+
|
| 783 |
+
// first decide whether it's closer to the left end or the right end
|
| 784 |
+
RealScalar mid = left + (right-left) / Literal(2);
|
| 785 |
+
RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
|
| 786 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 787 |
+
std::cout << "right-left = " << right-left << "\n";
|
| 788 |
+
// std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, ArrayXr(diag-left), left)
|
| 789 |
+
// << " " << secularEq(mid-right, col0, diag, perm, ArrayXr(diag-right), right) << "\n";
|
| 790 |
+
std::cout << " = " << secularEq(left+RealScalar(0.000001)*(right-left), col0, diag, perm, diag, 0)
|
| 791 |
+
<< " " << secularEq(left+RealScalar(0.1) *(right-left), col0, diag, perm, diag, 0)
|
| 792 |
+
<< " " << secularEq(left+RealScalar(0.2) *(right-left), col0, diag, perm, diag, 0)
|
| 793 |
+
<< " " << secularEq(left+RealScalar(0.3) *(right-left), col0, diag, perm, diag, 0)
|
| 794 |
+
<< " " << secularEq(left+RealScalar(0.4) *(right-left), col0, diag, perm, diag, 0)
|
| 795 |
+
<< " " << secularEq(left+RealScalar(0.49) *(right-left), col0, diag, perm, diag, 0)
|
| 796 |
+
<< " " << secularEq(left+RealScalar(0.5) *(right-left), col0, diag, perm, diag, 0)
|
| 797 |
+
<< " " << secularEq(left+RealScalar(0.51) *(right-left), col0, diag, perm, diag, 0)
|
| 798 |
+
<< " " << secularEq(left+RealScalar(0.6) *(right-left), col0, diag, perm, diag, 0)
|
| 799 |
+
<< " " << secularEq(left+RealScalar(0.7) *(right-left), col0, diag, perm, diag, 0)
|
| 800 |
+
<< " " << secularEq(left+RealScalar(0.8) *(right-left), col0, diag, perm, diag, 0)
|
| 801 |
+
<< " " << secularEq(left+RealScalar(0.9) *(right-left), col0, diag, perm, diag, 0)
|
| 802 |
+
<< " " << secularEq(left+RealScalar(0.999999)*(right-left), col0, diag, perm, diag, 0) << "\n";
|
| 803 |
+
#endif
|
| 804 |
+
RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
|
| 805 |
+
|
| 806 |
+
// measure everything relative to shift
|
| 807 |
+
Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
|
| 808 |
+
diagShifted = diag - shift;
|
| 809 |
+
|
| 810 |
+
if(k!=actual_n-1)
|
| 811 |
+
{
|
| 812 |
+
// check that after the shift, f(mid) is still negative:
|
| 813 |
+
RealScalar midShifted = (right - left) / RealScalar(2);
|
| 814 |
+
if(shift==right)
|
| 815 |
+
midShifted = -midShifted;
|
| 816 |
+
RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
|
| 817 |
+
if(fMidShifted>0)
|
| 818 |
+
{
|
| 819 |
+
// fMid was erroneous, fix it:
|
| 820 |
+
shift = fMidShifted > Literal(0) ? left : right;
|
| 821 |
+
diagShifted = diag - shift;
|
| 822 |
+
}
|
| 823 |
+
}
|
| 824 |
+
|
| 825 |
+
// initial guess
|
| 826 |
+
RealScalar muPrev, muCur;
|
| 827 |
+
if (shift == left)
|
| 828 |
+
{
|
| 829 |
+
muPrev = (right - left) * RealScalar(0.1);
|
| 830 |
+
if (k == actual_n-1) muCur = right - left;
|
| 831 |
+
else muCur = (right - left) * RealScalar(0.5);
|
| 832 |
+
}
|
| 833 |
+
else
|
| 834 |
+
{
|
| 835 |
+
muPrev = -(right - left) * RealScalar(0.1);
|
| 836 |
+
muCur = -(right - left) * RealScalar(0.5);
|
| 837 |
+
}
|
| 838 |
+
|
| 839 |
+
RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
|
| 840 |
+
RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);
|
| 841 |
+
if (abs(fPrev) < abs(fCur))
|
| 842 |
+
{
|
| 843 |
+
swap(fPrev, fCur);
|
| 844 |
+
swap(muPrev, muCur);
|
| 845 |
+
}
|
| 846 |
+
|
| 847 |
+
// rational interpolation: fit a function of the form a / mu + b through the two previous
|
| 848 |
+
// iterates and use its zero to compute the next iterate
|
| 849 |
+
bool useBisection = fPrev*fCur>Literal(0);
|
| 850 |
+
while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
|
| 851 |
+
{
|
| 852 |
+
++m_numIters;
|
| 853 |
+
|
| 854 |
+
// Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
|
| 855 |
+
RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
|
| 856 |
+
RealScalar b = fCur - a / muCur;
|
| 857 |
+
// And find mu such that f(mu)==0:
|
| 858 |
+
RealScalar muZero = -a/b;
|
| 859 |
+
RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
|
| 860 |
+
|
| 861 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 862 |
+
assert((numext::isfinite)(fZero));
|
| 863 |
+
#endif
|
| 864 |
+
|
| 865 |
+
muPrev = muCur;
|
| 866 |
+
fPrev = fCur;
|
| 867 |
+
muCur = muZero;
|
| 868 |
+
fCur = fZero;
|
| 869 |
+
|
| 870 |
+
if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
|
| 871 |
+
if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
|
| 872 |
+
if (abs(fCur)>abs(fPrev)) useBisection = true;
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
// fall back on bisection method if rational interpolation did not work
|
| 876 |
+
if (useBisection)
|
| 877 |
+
{
|
| 878 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 879 |
+
std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n";
|
| 880 |
+
#endif
|
| 881 |
+
RealScalar leftShifted, rightShifted;
|
| 882 |
+
if (shift == left)
|
| 883 |
+
{
|
| 884 |
+
// to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
|
| 885 |
+
// the factor 2 is to be more conservative
|
| 886 |
+
leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
|
| 887 |
+
|
| 888 |
+
// check that we did it right:
|
| 889 |
+
eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
|
| 890 |
+
// I don't understand why the case k==0 would be special there:
|
| 891 |
+
// if (k == 0) rightShifted = right - left; else
|
| 892 |
+
rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
|
| 893 |
+
}
|
| 894 |
+
else
|
| 895 |
+
{
|
| 896 |
+
leftShifted = -(right - left) * RealScalar(0.51);
|
| 897 |
+
if(k+1<n)
|
| 898 |
+
rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
|
| 899 |
+
else
|
| 900 |
+
rightShifted = -(std::numeric_limits<RealScalar>::min)();
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
|
| 904 |
+
eigen_internal_assert(fLeft<Literal(0));
|
| 905 |
+
|
| 906 |
+
#if defined EIGEN_BDCSVD_DEBUG_VERBOSE || defined EIGEN_BDCSVD_SANITY_CHECKS || defined EIGEN_INTERNAL_DEBUGGING
|
| 907 |
+
RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
|
| 908 |
+
#endif
|
| 909 |
+
|
| 910 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 911 |
+
if(!(numext::isfinite)(fLeft))
|
| 912 |
+
std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n";
|
| 913 |
+
assert((numext::isfinite)(fLeft));
|
| 914 |
+
|
| 915 |
+
if(!(numext::isfinite)(fRight))
|
| 916 |
+
std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n";
|
| 917 |
+
// assert((numext::isfinite)(fRight));
|
| 918 |
+
#endif
|
| 919 |
+
|
| 920 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 921 |
+
if(!(fLeft * fRight<0))
|
| 922 |
+
{
|
| 923 |
+
std::cout << "f(leftShifted) using leftShifted=" << leftShifted << " ; diagShifted(1:10):" << diagShifted.head(10).transpose() << "\n ; "
|
| 924 |
+
<< "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n";
|
| 925 |
+
std::cout << "k=" << k << ", " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; "
|
| 926 |
+
<< "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift
|
| 927 |
+
<< " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift)
|
| 928 |
+
<< " == " << secularEq(right, col0, diag, perm, diag, 0) << " == " << fRight << "\n";
|
| 929 |
+
}
|
| 930 |
+
#endif
|
| 931 |
+
eigen_internal_assert(fLeft * fRight < Literal(0));
|
| 932 |
+
|
| 933 |
+
if(fLeft<Literal(0))
|
| 934 |
+
{
|
| 935 |
+
while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
|
| 936 |
+
{
|
| 937 |
+
RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
|
| 938 |
+
fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
|
| 939 |
+
eigen_internal_assert((numext::isfinite)(fMid));
|
| 940 |
+
|
| 941 |
+
if (fLeft * fMid < Literal(0))
|
| 942 |
+
{
|
| 943 |
+
rightShifted = midShifted;
|
| 944 |
+
}
|
| 945 |
+
else
|
| 946 |
+
{
|
| 947 |
+
leftShifted = midShifted;
|
| 948 |
+
fLeft = fMid;
|
| 949 |
+
}
|
| 950 |
+
}
|
| 951 |
+
muCur = (leftShifted + rightShifted) / Literal(2);
|
| 952 |
+
}
|
| 953 |
+
else
|
| 954 |
+
{
|
| 955 |
+
// We have a problem as shifting on the left or right give either a positive or negative value
|
| 956 |
+
// at the middle of [left,right]...
|
| 957 |
+
// Instead fo abbording or entering an infinite loop,
|
| 958 |
+
// let's just use the middle as the estimated zero-crossing:
|
| 959 |
+
muCur = (right - left) * RealScalar(0.5);
|
| 960 |
+
if(shift == right)
|
| 961 |
+
muCur = -muCur;
|
| 962 |
+
}
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
singVals[k] = shift + muCur;
|
| 966 |
+
shifts[k] = shift;
|
| 967 |
+
mus[k] = muCur;
|
| 968 |
+
|
| 969 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 970 |
+
if(k+1<n)
|
| 971 |
+
std::cout << "found " << singVals[k] << " == " << shift << " + " << muCur << " from " << diag(k) << " .. " << diag(k+1) << "\n";
|
| 972 |
+
#endif
|
| 973 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 974 |
+
assert(k==0 || singVals[k]>=singVals[k-1]);
|
| 975 |
+
assert(singVals[k]>=diag(k));
|
| 976 |
+
#endif
|
| 977 |
+
|
| 978 |
+
// perturb singular value slightly if it equals diagonal entry to avoid division by zero later
|
| 979 |
+
// (deflation is supposed to avoid this from happening)
|
| 980 |
+
// - this does no seem to be necessary anymore -
|
| 981 |
+
// if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
|
| 982 |
+
// if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
|
| 983 |
+
}
|
| 984 |
+
}
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
|
| 988 |
+
template <typename MatrixType>
|
| 989 |
+
void BDCSVD<MatrixType>::perturbCol0
|
| 990 |
+
(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
|
| 991 |
+
const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)
|
| 992 |
+
{
|
| 993 |
+
using std::sqrt;
|
| 994 |
+
Index n = col0.size();
|
| 995 |
+
Index m = perm.size();
|
| 996 |
+
if(m==0)
|
| 997 |
+
{
|
| 998 |
+
zhat.setZero();
|
| 999 |
+
return;
|
| 1000 |
+
}
|
| 1001 |
+
Index lastIdx = perm(m-1);
|
| 1002 |
+
// The offset permits to skip deflated entries while computing zhat
|
| 1003 |
+
for (Index k = 0; k < n; ++k)
|
| 1004 |
+
{
|
| 1005 |
+
if (col0(k) == Literal(0)) // deflated
|
| 1006 |
+
zhat(k) = Literal(0);
|
| 1007 |
+
else
|
| 1008 |
+
{
|
| 1009 |
+
// see equation (3.6)
|
| 1010 |
+
RealScalar dk = diag(k);
|
| 1011 |
+
RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk));
|
| 1012 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1013 |
+
if(prod<0) {
|
| 1014 |
+
std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n";
|
| 1015 |
+
std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n";
|
| 1016 |
+
std::cout << " = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) << "\n";
|
| 1017 |
+
}
|
| 1018 |
+
assert(prod>=0);
|
| 1019 |
+
#endif
|
| 1020 |
+
|
| 1021 |
+
for(Index l = 0; l<m; ++l)
|
| 1022 |
+
{
|
| 1023 |
+
Index i = perm(l);
|
| 1024 |
+
if(i!=k)
|
| 1025 |
+
{
|
| 1026 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1027 |
+
if(i>=k && (l==0 || l-1>=m))
|
| 1028 |
+
{
|
| 1029 |
+
std::cout << "Error in perturbCol0\n";
|
| 1030 |
+
std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n";
|
| 1031 |
+
std::cout << " " <<diag(i) << "\n";
|
| 1032 |
+
Index j = (i<k /*|| l==0*/) ? i : perm(l-1);
|
| 1033 |
+
std::cout << " " << "j=" << j << "\n";
|
| 1034 |
+
}
|
| 1035 |
+
#endif
|
| 1036 |
+
// Avoid index out of bounds.
|
| 1037 |
+
// Will end up setting zhat(k) = 0.
|
| 1038 |
+
if (i >= k && l == 0) {
|
| 1039 |
+
m_info = NumericalIssue;
|
| 1040 |
+
prod = 0;
|
| 1041 |
+
break;
|
| 1042 |
+
}
|
| 1043 |
+
Index j = i<k ? i : l > 0 ? perm(l-1) : i;
|
| 1044 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1045 |
+
if(!(dk!=Literal(0) || diag(i)!=Literal(0)))
|
| 1046 |
+
{
|
| 1047 |
+
std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n";
|
| 1048 |
+
}
|
| 1049 |
+
assert(dk!=Literal(0) || diag(i)!=Literal(0));
|
| 1050 |
+
#endif
|
| 1051 |
+
prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
|
| 1052 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1053 |
+
assert(prod>=0);
|
| 1054 |
+
#endif
|
| 1055 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1056 |
+
if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
|
| 1057 |
+
std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
|
| 1058 |
+
<< ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
|
| 1059 |
+
#endif
|
| 1060 |
+
}
|
| 1061 |
+
}
|
| 1062 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1063 |
+
std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n";
|
| 1064 |
+
#endif
|
| 1065 |
+
RealScalar tmp = sqrt(prod);
|
| 1066 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1067 |
+
assert((numext::isfinite)(tmp));
|
| 1068 |
+
#endif
|
| 1069 |
+
zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
|
| 1070 |
+
}
|
| 1071 |
+
}
|
| 1072 |
+
}
|
| 1073 |
+
|
| 1074 |
+
// compute singular vectors
|
| 1075 |
+
template <typename MatrixType>
|
| 1076 |
+
void BDCSVD<MatrixType>::computeSingVecs
|
| 1077 |
+
(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
|
| 1078 |
+
const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)
|
| 1079 |
+
{
|
| 1080 |
+
Index n = zhat.size();
|
| 1081 |
+
Index m = perm.size();
|
| 1082 |
+
|
| 1083 |
+
for (Index k = 0; k < n; ++k)
|
| 1084 |
+
{
|
| 1085 |
+
if (zhat(k) == Literal(0))
|
| 1086 |
+
{
|
| 1087 |
+
U.col(k) = VectorType::Unit(n+1, k);
|
| 1088 |
+
if (m_compV) V.col(k) = VectorType::Unit(n, k);
|
| 1089 |
+
}
|
| 1090 |
+
else
|
| 1091 |
+
{
|
| 1092 |
+
U.col(k).setZero();
|
| 1093 |
+
for(Index l=0;l<m;++l)
|
| 1094 |
+
{
|
| 1095 |
+
Index i = perm(l);
|
| 1096 |
+
U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
|
| 1097 |
+
}
|
| 1098 |
+
U(n,k) = Literal(0);
|
| 1099 |
+
U.col(k).normalize();
|
| 1100 |
+
|
| 1101 |
+
if (m_compV)
|
| 1102 |
+
{
|
| 1103 |
+
V.col(k).setZero();
|
| 1104 |
+
for(Index l=1;l<m;++l)
|
| 1105 |
+
{
|
| 1106 |
+
Index i = perm(l);
|
| 1107 |
+
V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
|
| 1108 |
+
}
|
| 1109 |
+
V(0,k) = Literal(-1);
|
| 1110 |
+
V.col(k).normalize();
|
| 1111 |
+
}
|
| 1112 |
+
}
|
| 1113 |
+
}
|
| 1114 |
+
U.col(n) = VectorType::Unit(n+1, n);
|
| 1115 |
+
}
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
// page 12_13
|
| 1119 |
+
// i >= 1, di almost null and zi non null.
|
| 1120 |
+
// We use a rotation to zero out zi applied to the left of M
|
| 1121 |
+
template <typename MatrixType>
|
| 1122 |
+
void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
|
| 1123 |
+
{
|
| 1124 |
+
using std::abs;
|
| 1125 |
+
using std::sqrt;
|
| 1126 |
+
using std::pow;
|
| 1127 |
+
Index start = firstCol + shift;
|
| 1128 |
+
RealScalar c = m_computed(start, start);
|
| 1129 |
+
RealScalar s = m_computed(start+i, start);
|
| 1130 |
+
RealScalar r = numext::hypot(c,s);
|
| 1131 |
+
if (r == Literal(0))
|
| 1132 |
+
{
|
| 1133 |
+
m_computed(start+i, start+i) = Literal(0);
|
| 1134 |
+
return;
|
| 1135 |
+
}
|
| 1136 |
+
m_computed(start,start) = r;
|
| 1137 |
+
m_computed(start+i, start) = Literal(0);
|
| 1138 |
+
m_computed(start+i, start+i) = Literal(0);
|
| 1139 |
+
|
| 1140 |
+
JacobiRotation<RealScalar> J(c/r,-s/r);
|
| 1141 |
+
if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
|
| 1142 |
+
else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);
|
| 1143 |
+
}// end deflation 43
|
| 1144 |
+
|
| 1145 |
+
|
| 1146 |
+
// page 13
|
| 1147 |
+
// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
|
| 1148 |
+
// We apply two rotations to have zj = 0;
|
| 1149 |
+
// TODO deflation44 is still broken and not properly tested
|
| 1150 |
+
template <typename MatrixType>
|
| 1151 |
+
void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
|
| 1152 |
+
{
|
| 1153 |
+
using std::abs;
|
| 1154 |
+
using std::sqrt;
|
| 1155 |
+
using std::conj;
|
| 1156 |
+
using std::pow;
|
| 1157 |
+
RealScalar c = m_computed(firstColm+i, firstColm);
|
| 1158 |
+
RealScalar s = m_computed(firstColm+j, firstColm);
|
| 1159 |
+
RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
|
| 1160 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1161 |
+
std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; "
|
| 1162 |
+
<< m_computed(firstColm + i-1, firstColm) << " "
|
| 1163 |
+
<< m_computed(firstColm + i, firstColm) << " "
|
| 1164 |
+
<< m_computed(firstColm + i+1, firstColm) << " "
|
| 1165 |
+
<< m_computed(firstColm + i+2, firstColm) << "\n";
|
| 1166 |
+
std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " "
|
| 1167 |
+
<< m_computed(firstColm + i, firstColm+i) << " "
|
| 1168 |
+
<< m_computed(firstColm + i+1, firstColm+i+1) << " "
|
| 1169 |
+
<< m_computed(firstColm + i+2, firstColm+i+2) << "\n";
|
| 1170 |
+
#endif
|
| 1171 |
+
if (r==Literal(0))
|
| 1172 |
+
{
|
| 1173 |
+
m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
|
| 1174 |
+
return;
|
| 1175 |
+
}
|
| 1176 |
+
c/=r;
|
| 1177 |
+
s/=r;
|
| 1178 |
+
m_computed(firstColm + i, firstColm) = r;
|
| 1179 |
+
m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
|
| 1180 |
+
m_computed(firstColm + j, firstColm) = Literal(0);
|
| 1181 |
+
|
| 1182 |
+
JacobiRotation<RealScalar> J(c,-s);
|
| 1183 |
+
if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
|
| 1184 |
+
else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);
|
| 1185 |
+
if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);
|
| 1186 |
+
}// end deflation 44
|
| 1187 |
+
|
| 1188 |
+
|
| 1189 |
+
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
|
| 1190 |
+
template <typename MatrixType>
|
| 1191 |
+
void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
|
| 1192 |
+
{
|
| 1193 |
+
using std::sqrt;
|
| 1194 |
+
using std::abs;
|
| 1195 |
+
const Index length = lastCol + 1 - firstCol;
|
| 1196 |
+
|
| 1197 |
+
Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
|
| 1198 |
+
Diagonal<MatrixXr> fulldiag(m_computed);
|
| 1199 |
+
VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);
|
| 1200 |
+
|
| 1201 |
+
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
|
| 1202 |
+
RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
|
| 1203 |
+
RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
|
| 1204 |
+
RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
|
| 1205 |
+
|
| 1206 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1207 |
+
assert(m_naiveU.allFinite());
|
| 1208 |
+
assert(m_naiveV.allFinite());
|
| 1209 |
+
assert(m_computed.allFinite());
|
| 1210 |
+
#endif
|
| 1211 |
+
|
| 1212 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1213 |
+
std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n";
|
| 1214 |
+
#endif
|
| 1215 |
+
|
| 1216 |
+
//condition 4.1
|
| 1217 |
+
if (diag(0) < epsilon_coarse)
|
| 1218 |
+
{
|
| 1219 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1220 |
+
std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n";
|
| 1221 |
+
#endif
|
| 1222 |
+
diag(0) = epsilon_coarse;
|
| 1223 |
+
}
|
| 1224 |
+
|
| 1225 |
+
//condition 4.2
|
| 1226 |
+
for (Index i=1;i<length;++i)
|
| 1227 |
+
if (abs(col0(i)) < epsilon_strict)
|
| 1228 |
+
{
|
| 1229 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1230 |
+
std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << " (diag(" << i << ")=" << diag(i) << ")\n";
|
| 1231 |
+
#endif
|
| 1232 |
+
col0(i) = Literal(0);
|
| 1233 |
+
}
|
| 1234 |
+
|
| 1235 |
+
//condition 4.3
|
| 1236 |
+
for (Index i=1;i<length; i++)
|
| 1237 |
+
if (diag(i) < epsilon_coarse)
|
| 1238 |
+
{
|
| 1239 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1240 |
+
std::cout << "deflation 4.3, cancel z(" << i << ")=" << col0(i) << " because diag(" << i << ")=" << diag(i) << " < " << epsilon_coarse << "\n";
|
| 1241 |
+
#endif
|
| 1242 |
+
deflation43(firstCol, shift, i, length);
|
| 1243 |
+
}
|
| 1244 |
+
|
| 1245 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1246 |
+
assert(m_naiveU.allFinite());
|
| 1247 |
+
assert(m_naiveV.allFinite());
|
| 1248 |
+
assert(m_computed.allFinite());
|
| 1249 |
+
#endif
|
| 1250 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1251 |
+
std::cout << "to be sorted: " << diag.transpose() << "\n\n";
|
| 1252 |
+
std::cout << " : " << col0.transpose() << "\n\n";
|
| 1253 |
+
#endif
|
| 1254 |
+
{
|
| 1255 |
+
// Check for total deflation
|
| 1256 |
+
// If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting.
|
| 1257 |
+
const bool total_deflation = (col0.tail(length-1).array().abs()<considerZero).all();
|
| 1258 |
+
|
| 1259 |
+
// Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
|
| 1260 |
+
// First, compute the respective permutation.
|
| 1261 |
+
Index *permutation = m_workspaceI.data();
|
| 1262 |
+
{
|
| 1263 |
+
permutation[0] = 0;
|
| 1264 |
+
Index p = 1;
|
| 1265 |
+
|
| 1266 |
+
// Move deflated diagonal entries at the end.
|
| 1267 |
+
for(Index i=1; i<length; ++i)
|
| 1268 |
+
if(abs(diag(i))<considerZero)
|
| 1269 |
+
permutation[p++] = i;
|
| 1270 |
+
|
| 1271 |
+
Index i=1, j=k+1;
|
| 1272 |
+
for( ; p < length; ++p)
|
| 1273 |
+
{
|
| 1274 |
+
if (i > k) permutation[p] = j++;
|
| 1275 |
+
else if (j >= length) permutation[p] = i++;
|
| 1276 |
+
else if (diag(i) < diag(j)) permutation[p] = j++;
|
| 1277 |
+
else permutation[p] = i++;
|
| 1278 |
+
}
|
| 1279 |
+
}
|
| 1280 |
+
|
| 1281 |
+
// If we have a total deflation, then we have to insert diag(0) at the right place
|
| 1282 |
+
if(total_deflation)
|
| 1283 |
+
{
|
| 1284 |
+
for(Index i=1; i<length; ++i)
|
| 1285 |
+
{
|
| 1286 |
+
Index pi = permutation[i];
|
| 1287 |
+
if(abs(diag(pi))<considerZero || diag(0)<diag(pi))
|
| 1288 |
+
permutation[i-1] = permutation[i];
|
| 1289 |
+
else
|
| 1290 |
+
{
|
| 1291 |
+
permutation[i-1] = 0;
|
| 1292 |
+
break;
|
| 1293 |
+
}
|
| 1294 |
+
}
|
| 1295 |
+
}
|
| 1296 |
+
|
| 1297 |
+
// Current index of each col, and current column of each index
|
| 1298 |
+
Index *realInd = m_workspaceI.data()+length;
|
| 1299 |
+
Index *realCol = m_workspaceI.data()+2*length;
|
| 1300 |
+
|
| 1301 |
+
for(int pos = 0; pos< length; pos++)
|
| 1302 |
+
{
|
| 1303 |
+
realCol[pos] = pos;
|
| 1304 |
+
realInd[pos] = pos;
|
| 1305 |
+
}
|
| 1306 |
+
|
| 1307 |
+
for(Index i = total_deflation?0:1; i < length; i++)
|
| 1308 |
+
{
|
| 1309 |
+
const Index pi = permutation[length - (total_deflation ? i+1 : i)];
|
| 1310 |
+
const Index J = realCol[pi];
|
| 1311 |
+
|
| 1312 |
+
using std::swap;
|
| 1313 |
+
// swap diagonal and first column entries:
|
| 1314 |
+
swap(diag(i), diag(J));
|
| 1315 |
+
if(i!=0 && J!=0) swap(col0(i), col0(J));
|
| 1316 |
+
|
| 1317 |
+
// change columns
|
| 1318 |
+
if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));
|
| 1319 |
+
else m_naiveU.col(firstCol+i).segment(0, 2) .swap(m_naiveU.col(firstCol+J).segment(0, 2));
|
| 1320 |
+
if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));
|
| 1321 |
+
|
| 1322 |
+
//update real pos
|
| 1323 |
+
const Index realI = realInd[i];
|
| 1324 |
+
realCol[realI] = J;
|
| 1325 |
+
realCol[pi] = i;
|
| 1326 |
+
realInd[J] = realI;
|
| 1327 |
+
realInd[i] = pi;
|
| 1328 |
+
}
|
| 1329 |
+
}
|
| 1330 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1331 |
+
std::cout << "sorted: " << diag.transpose().format(bdcsvdfmt) << "\n";
|
| 1332 |
+
std::cout << " : " << col0.transpose() << "\n\n";
|
| 1333 |
+
#endif
|
| 1334 |
+
|
| 1335 |
+
//condition 4.4
|
| 1336 |
+
{
|
| 1337 |
+
Index i = length-1;
|
| 1338 |
+
while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;
|
| 1339 |
+
for(; i>1;--i)
|
| 1340 |
+
if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
|
| 1341 |
+
{
|
| 1342 |
+
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
|
| 1343 |
+
std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*/*diag(i)*/maxDiag << "\n";
|
| 1344 |
+
#endif
|
| 1345 |
+
eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
|
| 1346 |
+
deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
|
| 1347 |
+
}
|
| 1348 |
+
}
|
| 1349 |
+
|
| 1350 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1351 |
+
for(Index j=2;j<length;++j)
|
| 1352 |
+
assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);
|
| 1353 |
+
#endif
|
| 1354 |
+
|
| 1355 |
+
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
|
| 1356 |
+
assert(m_naiveU.allFinite());
|
| 1357 |
+
assert(m_naiveV.allFinite());
|
| 1358 |
+
assert(m_computed.allFinite());
|
| 1359 |
+
#endif
|
| 1360 |
+
}//end deflation
|
| 1361 |
+
|
| 1362 |
+
/** \svd_module
|
| 1363 |
+
*
|
| 1364 |
+
* \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
|
| 1365 |
+
*
|
| 1366 |
+
* \sa class BDCSVD
|
| 1367 |
+
*/
|
| 1368 |
+
template<typename Derived>
|
| 1369 |
+
BDCSVD<typename MatrixBase<Derived>::PlainObject>
|
| 1370 |
+
MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
|
| 1371 |
+
{
|
| 1372 |
+
return BDCSVD<PlainObject>(*this, computationOptions);
|
| 1373 |
+
}
|
| 1374 |
+
|
| 1375 |
+
} // end namespace Eigen
|
| 1376 |
+
|
| 1377 |
+
#endif
|
include/eigen/Eigen/src/SVD/JacobiSVD.h
ADDED
|
@@ -0,0 +1,813 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
| 5 |
+
// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 6 |
+
//
|
| 7 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 8 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 9 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 10 |
+
|
| 11 |
+
#ifndef EIGEN_JACOBISVD_H
|
| 12 |
+
#define EIGEN_JACOBISVD_H
|
| 13 |
+
|
| 14 |
+
namespace Eigen {
|
| 15 |
+
|
| 16 |
+
namespace internal {
|
| 17 |
+
// forward declaration (needed by ICC)
|
| 18 |
+
// the empty body is required by MSVC
|
| 19 |
+
template<typename MatrixType, int QRPreconditioner,
|
| 20 |
+
bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
|
| 21 |
+
struct svd_precondition_2x2_block_to_be_real {};
|
| 22 |
+
|
| 23 |
+
/*** QR preconditioners (R-SVD)
|
| 24 |
+
***
|
| 25 |
+
*** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
|
| 26 |
+
*** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
|
| 27 |
+
*** JacobiSVD which by itself is only able to work on square matrices.
|
| 28 |
+
***/
|
| 29 |
+
|
| 30 |
+
enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
|
| 31 |
+
|
| 32 |
+
template<typename MatrixType, int QRPreconditioner, int Case>
|
| 33 |
+
struct qr_preconditioner_should_do_anything
|
| 34 |
+
{
|
| 35 |
+
enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
|
| 36 |
+
MatrixType::ColsAtCompileTime != Dynamic &&
|
| 37 |
+
MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
|
| 38 |
+
b = MatrixType::RowsAtCompileTime != Dynamic &&
|
| 39 |
+
MatrixType::ColsAtCompileTime != Dynamic &&
|
| 40 |
+
MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
|
| 41 |
+
ret = !( (QRPreconditioner == NoQRPreconditioner) ||
|
| 42 |
+
(Case == PreconditionIfMoreColsThanRows && bool(a)) ||
|
| 43 |
+
(Case == PreconditionIfMoreRowsThanCols && bool(b)) )
|
| 44 |
+
};
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
template<typename MatrixType, int QRPreconditioner, int Case,
|
| 48 |
+
bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
|
| 49 |
+
> struct qr_preconditioner_impl {};
|
| 50 |
+
|
| 51 |
+
template<typename MatrixType, int QRPreconditioner, int Case>
|
| 52 |
+
class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
|
| 53 |
+
{
|
| 54 |
+
public:
|
| 55 |
+
void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
|
| 56 |
+
bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
|
| 57 |
+
{
|
| 58 |
+
return false;
|
| 59 |
+
}
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
/*** preconditioner using FullPivHouseholderQR ***/
|
| 63 |
+
|
| 64 |
+
template<typename MatrixType>
|
| 65 |
+
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
|
| 66 |
+
{
|
| 67 |
+
public:
|
| 68 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 69 |
+
enum
|
| 70 |
+
{
|
| 71 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 72 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
|
| 73 |
+
};
|
| 74 |
+
typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
|
| 75 |
+
|
| 76 |
+
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
|
| 77 |
+
{
|
| 78 |
+
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
|
| 79 |
+
{
|
| 80 |
+
m_qr.~QRType();
|
| 81 |
+
::new (&m_qr) QRType(svd.rows(), svd.cols());
|
| 82 |
+
}
|
| 83 |
+
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
|
| 87 |
+
{
|
| 88 |
+
if(matrix.rows() > matrix.cols())
|
| 89 |
+
{
|
| 90 |
+
m_qr.compute(matrix);
|
| 91 |
+
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
|
| 92 |
+
if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
|
| 93 |
+
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
|
| 94 |
+
return true;
|
| 95 |
+
}
|
| 96 |
+
return false;
|
| 97 |
+
}
|
| 98 |
+
private:
|
| 99 |
+
typedef FullPivHouseholderQR<MatrixType> QRType;
|
| 100 |
+
QRType m_qr;
|
| 101 |
+
WorkspaceType m_workspace;
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
template<typename MatrixType>
|
| 105 |
+
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
|
| 106 |
+
{
|
| 107 |
+
public:
|
| 108 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 109 |
+
enum
|
| 110 |
+
{
|
| 111 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 112 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 113 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
| 114 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
| 115 |
+
Options = MatrixType::Options
|
| 116 |
+
};
|
| 117 |
+
|
| 118 |
+
typedef typename internal::make_proper_matrix_type<
|
| 119 |
+
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
|
| 120 |
+
>::type TransposeTypeWithSameStorageOrder;
|
| 121 |
+
|
| 122 |
+
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
|
| 123 |
+
{
|
| 124 |
+
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
|
| 125 |
+
{
|
| 126 |
+
m_qr.~QRType();
|
| 127 |
+
::new (&m_qr) QRType(svd.cols(), svd.rows());
|
| 128 |
+
}
|
| 129 |
+
m_adjoint.resize(svd.cols(), svd.rows());
|
| 130 |
+
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
|
| 134 |
+
{
|
| 135 |
+
if(matrix.cols() > matrix.rows())
|
| 136 |
+
{
|
| 137 |
+
m_adjoint = matrix.adjoint();
|
| 138 |
+
m_qr.compute(m_adjoint);
|
| 139 |
+
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
|
| 140 |
+
if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
|
| 141 |
+
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
|
| 142 |
+
return true;
|
| 143 |
+
}
|
| 144 |
+
else return false;
|
| 145 |
+
}
|
| 146 |
+
private:
|
| 147 |
+
typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
|
| 148 |
+
QRType m_qr;
|
| 149 |
+
TransposeTypeWithSameStorageOrder m_adjoint;
|
| 150 |
+
typename internal::plain_row_type<MatrixType>::type m_workspace;
|
| 151 |
+
};
|
| 152 |
+
|
| 153 |
+
/*** preconditioner using ColPivHouseholderQR ***/
|
| 154 |
+
|
| 155 |
+
template<typename MatrixType>
|
| 156 |
+
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
|
| 157 |
+
{
|
| 158 |
+
public:
|
| 159 |
+
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
|
| 160 |
+
{
|
| 161 |
+
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
|
| 162 |
+
{
|
| 163 |
+
m_qr.~QRType();
|
| 164 |
+
::new (&m_qr) QRType(svd.rows(), svd.cols());
|
| 165 |
+
}
|
| 166 |
+
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
|
| 167 |
+
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
|
| 171 |
+
{
|
| 172 |
+
if(matrix.rows() > matrix.cols())
|
| 173 |
+
{
|
| 174 |
+
m_qr.compute(matrix);
|
| 175 |
+
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
|
| 176 |
+
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
|
| 177 |
+
else if(svd.m_computeThinU)
|
| 178 |
+
{
|
| 179 |
+
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
|
| 180 |
+
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
|
| 181 |
+
}
|
| 182 |
+
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
|
| 183 |
+
return true;
|
| 184 |
+
}
|
| 185 |
+
return false;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
private:
|
| 189 |
+
typedef ColPivHouseholderQR<MatrixType> QRType;
|
| 190 |
+
QRType m_qr;
|
| 191 |
+
typename internal::plain_col_type<MatrixType>::type m_workspace;
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
template<typename MatrixType>
|
| 195 |
+
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
|
| 196 |
+
{
|
| 197 |
+
public:
|
| 198 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 199 |
+
enum
|
| 200 |
+
{
|
| 201 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 202 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 203 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
| 204 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
| 205 |
+
Options = MatrixType::Options
|
| 206 |
+
};
|
| 207 |
+
|
| 208 |
+
typedef typename internal::make_proper_matrix_type<
|
| 209 |
+
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
|
| 210 |
+
>::type TransposeTypeWithSameStorageOrder;
|
| 211 |
+
|
| 212 |
+
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
|
| 213 |
+
{
|
| 214 |
+
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
|
| 215 |
+
{
|
| 216 |
+
m_qr.~QRType();
|
| 217 |
+
::new (&m_qr) QRType(svd.cols(), svd.rows());
|
| 218 |
+
}
|
| 219 |
+
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
|
| 220 |
+
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
|
| 221 |
+
m_adjoint.resize(svd.cols(), svd.rows());
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
|
| 225 |
+
{
|
| 226 |
+
if(matrix.cols() > matrix.rows())
|
| 227 |
+
{
|
| 228 |
+
m_adjoint = matrix.adjoint();
|
| 229 |
+
m_qr.compute(m_adjoint);
|
| 230 |
+
|
| 231 |
+
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
|
| 232 |
+
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
|
| 233 |
+
else if(svd.m_computeThinV)
|
| 234 |
+
{
|
| 235 |
+
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
|
| 236 |
+
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
|
| 237 |
+
}
|
| 238 |
+
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
|
| 239 |
+
return true;
|
| 240 |
+
}
|
| 241 |
+
else return false;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
private:
|
| 245 |
+
typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
|
| 246 |
+
QRType m_qr;
|
| 247 |
+
TransposeTypeWithSameStorageOrder m_adjoint;
|
| 248 |
+
typename internal::plain_row_type<MatrixType>::type m_workspace;
|
| 249 |
+
};
|
| 250 |
+
|
| 251 |
+
/*** preconditioner using HouseholderQR ***/
|
| 252 |
+
|
| 253 |
+
template<typename MatrixType>
|
| 254 |
+
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
|
| 255 |
+
{
|
| 256 |
+
public:
|
| 257 |
+
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
|
| 258 |
+
{
|
| 259 |
+
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
|
| 260 |
+
{
|
| 261 |
+
m_qr.~QRType();
|
| 262 |
+
::new (&m_qr) QRType(svd.rows(), svd.cols());
|
| 263 |
+
}
|
| 264 |
+
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
|
| 265 |
+
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
|
| 269 |
+
{
|
| 270 |
+
if(matrix.rows() > matrix.cols())
|
| 271 |
+
{
|
| 272 |
+
m_qr.compute(matrix);
|
| 273 |
+
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
|
| 274 |
+
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
|
| 275 |
+
else if(svd.m_computeThinU)
|
| 276 |
+
{
|
| 277 |
+
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
|
| 278 |
+
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
|
| 279 |
+
}
|
| 280 |
+
if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
|
| 281 |
+
return true;
|
| 282 |
+
}
|
| 283 |
+
return false;
|
| 284 |
+
}
|
| 285 |
+
private:
|
| 286 |
+
typedef HouseholderQR<MatrixType> QRType;
|
| 287 |
+
QRType m_qr;
|
| 288 |
+
typename internal::plain_col_type<MatrixType>::type m_workspace;
|
| 289 |
+
};
|
| 290 |
+
|
| 291 |
+
template<typename MatrixType>
|
| 292 |
+
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
|
| 293 |
+
{
|
| 294 |
+
public:
|
| 295 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 296 |
+
enum
|
| 297 |
+
{
|
| 298 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 299 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 300 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
| 301 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
| 302 |
+
Options = MatrixType::Options
|
| 303 |
+
};
|
| 304 |
+
|
| 305 |
+
typedef typename internal::make_proper_matrix_type<
|
| 306 |
+
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
|
| 307 |
+
>::type TransposeTypeWithSameStorageOrder;
|
| 308 |
+
|
| 309 |
+
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
|
| 310 |
+
{
|
| 311 |
+
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
|
| 312 |
+
{
|
| 313 |
+
m_qr.~QRType();
|
| 314 |
+
::new (&m_qr) QRType(svd.cols(), svd.rows());
|
| 315 |
+
}
|
| 316 |
+
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
|
| 317 |
+
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
|
| 318 |
+
m_adjoint.resize(svd.cols(), svd.rows());
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
|
| 322 |
+
{
|
| 323 |
+
if(matrix.cols() > matrix.rows())
|
| 324 |
+
{
|
| 325 |
+
m_adjoint = matrix.adjoint();
|
| 326 |
+
m_qr.compute(m_adjoint);
|
| 327 |
+
|
| 328 |
+
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
|
| 329 |
+
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
|
| 330 |
+
else if(svd.m_computeThinV)
|
| 331 |
+
{
|
| 332 |
+
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
|
| 333 |
+
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
|
| 334 |
+
}
|
| 335 |
+
if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
|
| 336 |
+
return true;
|
| 337 |
+
}
|
| 338 |
+
else return false;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
private:
|
| 342 |
+
typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
|
| 343 |
+
QRType m_qr;
|
| 344 |
+
TransposeTypeWithSameStorageOrder m_adjoint;
|
| 345 |
+
typename internal::plain_row_type<MatrixType>::type m_workspace;
|
| 346 |
+
};
|
| 347 |
+
|
| 348 |
+
/*** 2x2 SVD implementation
|
| 349 |
+
***
|
| 350 |
+
*** JacobiSVD consists in performing a series of 2x2 SVD subproblems
|
| 351 |
+
***/
|
| 352 |
+
|
| 353 |
+
template<typename MatrixType, int QRPreconditioner>
|
| 354 |
+
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
|
| 355 |
+
{
|
| 356 |
+
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
|
| 357 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 358 |
+
static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
|
| 359 |
+
};
|
| 360 |
+
|
| 361 |
+
template<typename MatrixType, int QRPreconditioner>
|
| 362 |
+
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
|
| 363 |
+
{
|
| 364 |
+
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
|
| 365 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 366 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 367 |
+
static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
|
| 368 |
+
{
|
| 369 |
+
using std::sqrt;
|
| 370 |
+
using std::abs;
|
| 371 |
+
Scalar z;
|
| 372 |
+
JacobiRotation<Scalar> rot;
|
| 373 |
+
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
|
| 374 |
+
|
| 375 |
+
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
|
| 376 |
+
const RealScalar precision = NumTraits<Scalar>::epsilon();
|
| 377 |
+
|
| 378 |
+
if(n==0)
|
| 379 |
+
{
|
| 380 |
+
// make sure first column is zero
|
| 381 |
+
work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);
|
| 382 |
+
|
| 383 |
+
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
|
| 384 |
+
{
|
| 385 |
+
// work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n
|
| 386 |
+
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
|
| 387 |
+
work_matrix.row(p) *= z;
|
| 388 |
+
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
|
| 389 |
+
}
|
| 390 |
+
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
|
| 391 |
+
{
|
| 392 |
+
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
|
| 393 |
+
work_matrix.row(q) *= z;
|
| 394 |
+
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
|
| 395 |
+
}
|
| 396 |
+
// otherwise the second row is already zero, so we have nothing to do.
|
| 397 |
+
}
|
| 398 |
+
else
|
| 399 |
+
{
|
| 400 |
+
rot.c() = conj(work_matrix.coeff(p,p)) / n;
|
| 401 |
+
rot.s() = work_matrix.coeff(q,p) / n;
|
| 402 |
+
work_matrix.applyOnTheLeft(p,q,rot);
|
| 403 |
+
if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
|
| 404 |
+
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
|
| 405 |
+
{
|
| 406 |
+
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
|
| 407 |
+
work_matrix.col(q) *= z;
|
| 408 |
+
if(svd.computeV()) svd.m_matrixV.col(q) *= z;
|
| 409 |
+
}
|
| 410 |
+
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
|
| 411 |
+
{
|
| 412 |
+
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
|
| 413 |
+
work_matrix.row(q) *= z;
|
| 414 |
+
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
|
| 415 |
+
}
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
// update largest diagonal entry
|
| 419 |
+
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));
|
| 420 |
+
// and check whether the 2x2 block is already diagonal
|
| 421 |
+
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
|
| 422 |
+
return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;
|
| 423 |
+
}
|
| 424 |
+
};
|
| 425 |
+
|
| 426 |
+
template<typename _MatrixType, int QRPreconditioner>
|
| 427 |
+
struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
|
| 428 |
+
: traits<_MatrixType>
|
| 429 |
+
{
|
| 430 |
+
typedef _MatrixType MatrixType;
|
| 431 |
+
};
|
| 432 |
+
|
| 433 |
+
} // end namespace internal
|
| 434 |
+
|
| 435 |
+
/** \ingroup SVD_Module
|
| 436 |
+
*
|
| 437 |
+
*
|
| 438 |
+
* \class JacobiSVD
|
| 439 |
+
*
|
| 440 |
+
* \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
|
| 441 |
+
*
|
| 442 |
+
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
|
| 443 |
+
* \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
|
| 444 |
+
* for the R-SVD step for non-square matrices. See discussion of possible values below.
|
| 445 |
+
*
|
| 446 |
+
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
|
| 447 |
+
* \f[ A = U S V^* \f]
|
| 448 |
+
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
|
| 449 |
+
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
|
| 450 |
+
* and right \em singular \em vectors of \a A respectively.
|
| 451 |
+
*
|
| 452 |
+
* Singular values are always sorted in decreasing order.
|
| 453 |
+
*
|
| 454 |
+
* This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
|
| 455 |
+
*
|
| 456 |
+
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
|
| 457 |
+
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
|
| 458 |
+
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
|
| 459 |
+
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
|
| 460 |
+
*
|
| 461 |
+
* Here's an example demonstrating basic usage:
|
| 462 |
+
* \include JacobiSVD_basic.cpp
|
| 463 |
+
* Output: \verbinclude JacobiSVD_basic.out
|
| 464 |
+
*
|
| 465 |
+
* This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
|
| 466 |
+
* bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
|
| 467 |
+
* \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
|
| 468 |
+
* In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
|
| 469 |
+
*
|
| 470 |
+
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
|
| 471 |
+
* terminate in finite (and reasonable) time.
|
| 472 |
+
*
|
| 473 |
+
* The possible values for QRPreconditioner are:
|
| 474 |
+
* \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
|
| 475 |
+
* \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
|
| 476 |
+
* Contrary to other QRs, it doesn't allow computing thin unitaries.
|
| 477 |
+
* \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
|
| 478 |
+
* This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
|
| 479 |
+
* is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
|
| 480 |
+
* process is more reliable than the optimized bidiagonal SVD iterations.
|
| 481 |
+
* \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
|
| 482 |
+
* JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
|
| 483 |
+
* faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
|
| 484 |
+
* if QR preconditioning is needed before applying it anyway.
|
| 485 |
+
*
|
| 486 |
+
* \sa MatrixBase::jacobiSvd()
|
| 487 |
+
*/
|
| 488 |
+
template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
|
| 489 |
+
: public SVDBase<JacobiSVD<_MatrixType,QRPreconditioner> >
|
| 490 |
+
{
|
| 491 |
+
typedef SVDBase<JacobiSVD> Base;
|
| 492 |
+
public:
|
| 493 |
+
|
| 494 |
+
typedef _MatrixType MatrixType;
|
| 495 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 496 |
+
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
| 497 |
+
enum {
|
| 498 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 499 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 500 |
+
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
|
| 501 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
| 502 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
| 503 |
+
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
|
| 504 |
+
MatrixOptions = MatrixType::Options
|
| 505 |
+
};
|
| 506 |
+
|
| 507 |
+
typedef typename Base::MatrixUType MatrixUType;
|
| 508 |
+
typedef typename Base::MatrixVType MatrixVType;
|
| 509 |
+
typedef typename Base::SingularValuesType SingularValuesType;
|
| 510 |
+
|
| 511 |
+
typedef typename internal::plain_row_type<MatrixType>::type RowType;
|
| 512 |
+
typedef typename internal::plain_col_type<MatrixType>::type ColType;
|
| 513 |
+
typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
|
| 514 |
+
MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
|
| 515 |
+
WorkMatrixType;
|
| 516 |
+
|
| 517 |
+
/** \brief Default Constructor.
|
| 518 |
+
*
|
| 519 |
+
* The default constructor is useful in cases in which the user intends to
|
| 520 |
+
* perform decompositions via JacobiSVD::compute(const MatrixType&).
|
| 521 |
+
*/
|
| 522 |
+
JacobiSVD()
|
| 523 |
+
{}
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
/** \brief Default Constructor with memory preallocation
|
| 527 |
+
*
|
| 528 |
+
* Like the default constructor but with preallocation of the internal data
|
| 529 |
+
* according to the specified problem size.
|
| 530 |
+
* \sa JacobiSVD()
|
| 531 |
+
*/
|
| 532 |
+
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
|
| 533 |
+
{
|
| 534 |
+
allocate(rows, cols, computationOptions);
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
/** \brief Constructor performing the decomposition of given matrix.
|
| 538 |
+
*
|
| 539 |
+
* \param matrix the matrix to decompose
|
| 540 |
+
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
|
| 541 |
+
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
|
| 542 |
+
* #ComputeFullV, #ComputeThinV.
|
| 543 |
+
*
|
| 544 |
+
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
|
| 545 |
+
* available with the (non-default) FullPivHouseholderQR preconditioner.
|
| 546 |
+
*/
|
| 547 |
+
explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
|
| 548 |
+
{
|
| 549 |
+
compute(matrix, computationOptions);
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
/** \brief Method performing the decomposition of given matrix using custom options.
|
| 553 |
+
*
|
| 554 |
+
* \param matrix the matrix to decompose
|
| 555 |
+
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
|
| 556 |
+
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
|
| 557 |
+
* #ComputeFullV, #ComputeThinV.
|
| 558 |
+
*
|
| 559 |
+
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
|
| 560 |
+
* available with the (non-default) FullPivHouseholderQR preconditioner.
|
| 561 |
+
*/
|
| 562 |
+
JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
|
| 563 |
+
|
| 564 |
+
/** \brief Method performing the decomposition of given matrix using current options.
|
| 565 |
+
*
|
| 566 |
+
* \param matrix the matrix to decompose
|
| 567 |
+
*
|
| 568 |
+
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
|
| 569 |
+
*/
|
| 570 |
+
JacobiSVD& compute(const MatrixType& matrix)
|
| 571 |
+
{
|
| 572 |
+
return compute(matrix, m_computationOptions);
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
using Base::computeU;
|
| 576 |
+
using Base::computeV;
|
| 577 |
+
using Base::rows;
|
| 578 |
+
using Base::cols;
|
| 579 |
+
using Base::rank;
|
| 580 |
+
|
| 581 |
+
private:
|
| 582 |
+
void allocate(Index rows, Index cols, unsigned int computationOptions);
|
| 583 |
+
|
| 584 |
+
protected:
|
| 585 |
+
using Base::m_matrixU;
|
| 586 |
+
using Base::m_matrixV;
|
| 587 |
+
using Base::m_singularValues;
|
| 588 |
+
using Base::m_info;
|
| 589 |
+
using Base::m_isInitialized;
|
| 590 |
+
using Base::m_isAllocated;
|
| 591 |
+
using Base::m_usePrescribedThreshold;
|
| 592 |
+
using Base::m_computeFullU;
|
| 593 |
+
using Base::m_computeThinU;
|
| 594 |
+
using Base::m_computeFullV;
|
| 595 |
+
using Base::m_computeThinV;
|
| 596 |
+
using Base::m_computationOptions;
|
| 597 |
+
using Base::m_nonzeroSingularValues;
|
| 598 |
+
using Base::m_rows;
|
| 599 |
+
using Base::m_cols;
|
| 600 |
+
using Base::m_diagSize;
|
| 601 |
+
using Base::m_prescribedThreshold;
|
| 602 |
+
WorkMatrixType m_workMatrix;
|
| 603 |
+
|
| 604 |
+
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
|
| 605 |
+
friend struct internal::svd_precondition_2x2_block_to_be_real;
|
| 606 |
+
template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
|
| 607 |
+
friend struct internal::qr_preconditioner_impl;
|
| 608 |
+
|
| 609 |
+
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
|
| 610 |
+
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
|
| 611 |
+
MatrixType m_scaledMatrix;
|
| 612 |
+
};
|
| 613 |
+
|
| 614 |
+
template<typename MatrixType, int QRPreconditioner>
|
| 615 |
+
void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
|
| 616 |
+
{
|
| 617 |
+
eigen_assert(rows >= 0 && cols >= 0);
|
| 618 |
+
|
| 619 |
+
if (m_isAllocated &&
|
| 620 |
+
rows == m_rows &&
|
| 621 |
+
cols == m_cols &&
|
| 622 |
+
computationOptions == m_computationOptions)
|
| 623 |
+
{
|
| 624 |
+
return;
|
| 625 |
+
}
|
| 626 |
+
|
| 627 |
+
m_rows = rows;
|
| 628 |
+
m_cols = cols;
|
| 629 |
+
m_info = Success;
|
| 630 |
+
m_isInitialized = false;
|
| 631 |
+
m_isAllocated = true;
|
| 632 |
+
m_computationOptions = computationOptions;
|
| 633 |
+
m_computeFullU = (computationOptions & ComputeFullU) != 0;
|
| 634 |
+
m_computeThinU = (computationOptions & ComputeThinU) != 0;
|
| 635 |
+
m_computeFullV = (computationOptions & ComputeFullV) != 0;
|
| 636 |
+
m_computeThinV = (computationOptions & ComputeThinV) != 0;
|
| 637 |
+
eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
|
| 638 |
+
eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
|
| 639 |
+
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
|
| 640 |
+
"JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
|
| 641 |
+
if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
|
| 642 |
+
{
|
| 643 |
+
eigen_assert(!(m_computeThinU || m_computeThinV) &&
|
| 644 |
+
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
|
| 645 |
+
"Use the ColPivHouseholderQR preconditioner instead.");
|
| 646 |
+
}
|
| 647 |
+
m_diagSize = (std::min)(m_rows, m_cols);
|
| 648 |
+
m_singularValues.resize(m_diagSize);
|
| 649 |
+
if(RowsAtCompileTime==Dynamic)
|
| 650 |
+
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
|
| 651 |
+
: m_computeThinU ? m_diagSize
|
| 652 |
+
: 0);
|
| 653 |
+
if(ColsAtCompileTime==Dynamic)
|
| 654 |
+
m_matrixV.resize(m_cols, m_computeFullV ? m_cols
|
| 655 |
+
: m_computeThinV ? m_diagSize
|
| 656 |
+
: 0);
|
| 657 |
+
m_workMatrix.resize(m_diagSize, m_diagSize);
|
| 658 |
+
|
| 659 |
+
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
|
| 660 |
+
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
|
| 661 |
+
if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
|
| 662 |
+
}
|
| 663 |
+
|
| 664 |
+
template<typename MatrixType, int QRPreconditioner>
|
| 665 |
+
JacobiSVD<MatrixType, QRPreconditioner>&
|
| 666 |
+
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
|
| 667 |
+
{
|
| 668 |
+
using std::abs;
|
| 669 |
+
allocate(matrix.rows(), matrix.cols(), computationOptions);
|
| 670 |
+
|
| 671 |
+
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
|
| 672 |
+
// only worsening the precision of U and V as we accumulate more rotations
|
| 673 |
+
const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
|
| 674 |
+
|
| 675 |
+
// limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
|
| 676 |
+
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
|
| 677 |
+
|
| 678 |
+
// Scaling factor to reduce over/under-flows
|
| 679 |
+
RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
|
| 680 |
+
if (!(numext::isfinite)(scale)) {
|
| 681 |
+
m_isInitialized = true;
|
| 682 |
+
m_info = InvalidInput;
|
| 683 |
+
m_nonzeroSingularValues = 0;
|
| 684 |
+
return *this;
|
| 685 |
+
}
|
| 686 |
+
if(scale==RealScalar(0)) scale = RealScalar(1);
|
| 687 |
+
|
| 688 |
+
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
|
| 689 |
+
|
| 690 |
+
if(m_rows!=m_cols)
|
| 691 |
+
{
|
| 692 |
+
m_scaledMatrix = matrix / scale;
|
| 693 |
+
m_qr_precond_morecols.run(*this, m_scaledMatrix);
|
| 694 |
+
m_qr_precond_morerows.run(*this, m_scaledMatrix);
|
| 695 |
+
}
|
| 696 |
+
else
|
| 697 |
+
{
|
| 698 |
+
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
|
| 699 |
+
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
|
| 700 |
+
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
|
| 701 |
+
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
|
| 702 |
+
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
/*** step 2. The main Jacobi SVD iteration. ***/
|
| 706 |
+
RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();
|
| 707 |
+
|
| 708 |
+
bool finished = false;
|
| 709 |
+
while(!finished)
|
| 710 |
+
{
|
| 711 |
+
finished = true;
|
| 712 |
+
|
| 713 |
+
// do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
|
| 714 |
+
|
| 715 |
+
for(Index p = 1; p < m_diagSize; ++p)
|
| 716 |
+
{
|
| 717 |
+
for(Index q = 0; q < p; ++q)
|
| 718 |
+
{
|
| 719 |
+
// if this 2x2 sub-matrix is not diagonal already...
|
| 720 |
+
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
|
| 721 |
+
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
|
| 722 |
+
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
|
| 723 |
+
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
|
| 724 |
+
{
|
| 725 |
+
finished = false;
|
| 726 |
+
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
|
| 727 |
+
// the complex to real operation returns true if the updated 2x2 block is not already diagonal
|
| 728 |
+
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
|
| 729 |
+
{
|
| 730 |
+
JacobiRotation<RealScalar> j_left, j_right;
|
| 731 |
+
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
|
| 732 |
+
|
| 733 |
+
// accumulate resulting Jacobi rotations
|
| 734 |
+
m_workMatrix.applyOnTheLeft(p,q,j_left);
|
| 735 |
+
if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
|
| 736 |
+
|
| 737 |
+
m_workMatrix.applyOnTheRight(p,q,j_right);
|
| 738 |
+
if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
|
| 739 |
+
|
| 740 |
+
// keep track of the largest diagonal coefficient
|
| 741 |
+
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));
|
| 742 |
+
}
|
| 743 |
+
}
|
| 744 |
+
}
|
| 745 |
+
}
|
| 746 |
+
}
|
| 747 |
+
|
| 748 |
+
/*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
|
| 749 |
+
|
| 750 |
+
for(Index i = 0; i < m_diagSize; ++i)
|
| 751 |
+
{
|
| 752 |
+
// For a complex matrix, some diagonal coefficients might note have been
|
| 753 |
+
// treated by svd_precondition_2x2_block_to_be_real, and the imaginary part
|
| 754 |
+
// of some diagonal entry might not be null.
|
| 755 |
+
if(NumTraits<Scalar>::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero)
|
| 756 |
+
{
|
| 757 |
+
RealScalar a = abs(m_workMatrix.coeff(i,i));
|
| 758 |
+
m_singularValues.coeffRef(i) = abs(a);
|
| 759 |
+
if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
|
| 760 |
+
}
|
| 761 |
+
else
|
| 762 |
+
{
|
| 763 |
+
// m_workMatrix.coeff(i,i) is already real, no difficulty:
|
| 764 |
+
RealScalar a = numext::real(m_workMatrix.coeff(i,i));
|
| 765 |
+
m_singularValues.coeffRef(i) = abs(a);
|
| 766 |
+
if(computeU() && (a<RealScalar(0))) m_matrixU.col(i) = -m_matrixU.col(i);
|
| 767 |
+
}
|
| 768 |
+
}
|
| 769 |
+
|
| 770 |
+
m_singularValues *= scale;
|
| 771 |
+
|
| 772 |
+
/*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
|
| 773 |
+
|
| 774 |
+
m_nonzeroSingularValues = m_diagSize;
|
| 775 |
+
for(Index i = 0; i < m_diagSize; i++)
|
| 776 |
+
{
|
| 777 |
+
Index pos;
|
| 778 |
+
RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
|
| 779 |
+
if(maxRemainingSingularValue == RealScalar(0))
|
| 780 |
+
{
|
| 781 |
+
m_nonzeroSingularValues = i;
|
| 782 |
+
break;
|
| 783 |
+
}
|
| 784 |
+
if(pos)
|
| 785 |
+
{
|
| 786 |
+
pos += i;
|
| 787 |
+
std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
|
| 788 |
+
if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
|
| 789 |
+
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
|
| 790 |
+
}
|
| 791 |
+
}
|
| 792 |
+
|
| 793 |
+
m_isInitialized = true;
|
| 794 |
+
return *this;
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
/** \svd_module
|
| 798 |
+
*
|
| 799 |
+
* \return the singular value decomposition of \c *this computed by two-sided
|
| 800 |
+
* Jacobi transformations.
|
| 801 |
+
*
|
| 802 |
+
* \sa class JacobiSVD
|
| 803 |
+
*/
|
| 804 |
+
template<typename Derived>
|
| 805 |
+
JacobiSVD<typename MatrixBase<Derived>::PlainObject>
|
| 806 |
+
MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
|
| 807 |
+
{
|
| 808 |
+
return JacobiSVD<PlainObject>(*this, computationOptions);
|
| 809 |
+
}
|
| 810 |
+
|
| 811 |
+
} // end namespace Eigen
|
| 812 |
+
|
| 813 |
+
#endif // EIGEN_JACOBISVD_H
|
include/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
Copyright (c) 2011, Intel Corporation. All rights reserved.
|
| 3 |
+
|
| 4 |
+
Redistribution and use in source and binary forms, with or without modification,
|
| 5 |
+
are permitted provided that the following conditions are met:
|
| 6 |
+
|
| 7 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 8 |
+
list of conditions and the following disclaimer.
|
| 9 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 10 |
+
this list of conditions and the following disclaimer in the documentation
|
| 11 |
+
and/or other materials provided with the distribution.
|
| 12 |
+
* Neither the name of Intel Corporation nor the names of its contributors may
|
| 13 |
+
be used to endorse or promote products derived from this software without
|
| 14 |
+
specific prior written permission.
|
| 15 |
+
|
| 16 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 17 |
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 18 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 19 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
| 20 |
+
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 21 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 22 |
+
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
| 23 |
+
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 24 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 25 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 26 |
+
|
| 27 |
+
********************************************************************************
|
| 28 |
+
* Content : Eigen bindings to LAPACKe
|
| 29 |
+
* Singular Value Decomposition - SVD.
|
| 30 |
+
********************************************************************************
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
#ifndef EIGEN_JACOBISVD_LAPACKE_H
|
| 34 |
+
#define EIGEN_JACOBISVD_LAPACKE_H
|
| 35 |
+
|
| 36 |
+
namespace Eigen {
|
| 37 |
+
|
| 38 |
+
/** \internal Specialization for the data types supported by LAPACKe */
|
| 39 |
+
|
| 40 |
+
#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \
|
| 41 |
+
template<> inline \
|
| 42 |
+
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
|
| 43 |
+
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
|
| 44 |
+
{ \
|
| 45 |
+
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
|
| 46 |
+
/*typedef MatrixType::Scalar Scalar;*/ \
|
| 47 |
+
/*typedef MatrixType::RealScalar RealScalar;*/ \
|
| 48 |
+
allocate(matrix.rows(), matrix.cols(), computationOptions); \
|
| 49 |
+
\
|
| 50 |
+
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
|
| 51 |
+
m_nonzeroSingularValues = m_diagSize; \
|
| 52 |
+
\
|
| 53 |
+
lapack_int lda = internal::convert_index<lapack_int>(matrix.outerStride()), ldu, ldvt; \
|
| 54 |
+
lapack_int matrix_order = LAPACKE_COLROW; \
|
| 55 |
+
char jobu, jobvt; \
|
| 56 |
+
LAPACKE_TYPE *u, *vt, dummy; \
|
| 57 |
+
jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
|
| 58 |
+
jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
|
| 59 |
+
if (computeU()) { \
|
| 60 |
+
ldu = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \
|
| 61 |
+
u = (LAPACKE_TYPE*)m_matrixU.data(); \
|
| 62 |
+
} else { ldu=1; u=&dummy; }\
|
| 63 |
+
MatrixType localV; \
|
| 64 |
+
lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
|
| 65 |
+
if (computeV()) { \
|
| 66 |
+
localV.resize(vt_rows, m_cols); \
|
| 67 |
+
ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
|
| 68 |
+
vt = (LAPACKE_TYPE*)localV.data(); \
|
| 69 |
+
} else { ldvt=1; vt=&dummy; }\
|
| 70 |
+
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
|
| 71 |
+
MatrixType m_temp; m_temp = matrix; \
|
| 72 |
+
LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::convert_index<lapack_int>(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
|
| 73 |
+
if (computeV()) m_matrixV = localV.adjoint(); \
|
| 74 |
+
/* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
|
| 75 |
+
m_isInitialized = true; \
|
| 76 |
+
return *this; \
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
|
| 80 |
+
EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
|
| 81 |
+
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR)
|
| 82 |
+
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR)
|
| 83 |
+
|
| 84 |
+
EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
|
| 85 |
+
EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
|
| 86 |
+
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR)
|
| 87 |
+
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR)
|
| 88 |
+
|
| 89 |
+
} // end namespace Eigen
|
| 90 |
+
|
| 91 |
+
#endif // EIGEN_JACOBISVD_LAPACKE_H
|
include/eigen/Eigen/src/SVD/SVDBase.h
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
| 5 |
+
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 6 |
+
//
|
| 7 |
+
// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
|
| 8 |
+
// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
|
| 9 |
+
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
|
| 10 |
+
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
|
| 11 |
+
//
|
| 12 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 13 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 14 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 15 |
+
|
| 16 |
+
#ifndef EIGEN_SVDBASE_H
|
| 17 |
+
#define EIGEN_SVDBASE_H
|
| 18 |
+
|
| 19 |
+
namespace Eigen {
|
| 20 |
+
|
| 21 |
+
namespace internal {
|
| 22 |
+
template<typename Derived> struct traits<SVDBase<Derived> >
|
| 23 |
+
: traits<Derived>
|
| 24 |
+
{
|
| 25 |
+
typedef MatrixXpr XprKind;
|
| 26 |
+
typedef SolverStorage StorageKind;
|
| 27 |
+
typedef int StorageIndex;
|
| 28 |
+
enum { Flags = 0 };
|
| 29 |
+
};
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
/** \ingroup SVD_Module
|
| 33 |
+
*
|
| 34 |
+
*
|
| 35 |
+
* \class SVDBase
|
| 36 |
+
*
|
| 37 |
+
* \brief Base class of SVD algorithms
|
| 38 |
+
*
|
| 39 |
+
* \tparam Derived the type of the actual SVD decomposition
|
| 40 |
+
*
|
| 41 |
+
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
|
| 42 |
+
* \f[ A = U S V^* \f]
|
| 43 |
+
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
|
| 44 |
+
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
|
| 45 |
+
* and right \em singular \em vectors of \a A respectively.
|
| 46 |
+
*
|
| 47 |
+
* Singular values are always sorted in decreasing order.
|
| 48 |
+
*
|
| 49 |
+
*
|
| 50 |
+
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
|
| 51 |
+
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
|
| 52 |
+
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
|
| 53 |
+
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
|
| 54 |
+
*
|
| 55 |
+
* The status of the computation can be retrived using the \a info() method. Unless \a info() returns \a Success, the results should be not
|
| 56 |
+
* considered well defined.
|
| 57 |
+
*
|
| 58 |
+
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, and \a info() will return \a InvalidInput, but the computation is guaranteed to
|
| 59 |
+
* terminate in finite (and reasonable) time.
|
| 60 |
+
* \sa class BDCSVD, class JacobiSVD
|
| 61 |
+
*/
|
| 62 |
+
template<typename Derived> class SVDBase
|
| 63 |
+
: public SolverBase<SVDBase<Derived> >
|
| 64 |
+
{
|
| 65 |
+
public:
|
| 66 |
+
|
| 67 |
+
template<typename Derived_>
|
| 68 |
+
friend struct internal::solve_assertion;
|
| 69 |
+
|
| 70 |
+
typedef typename internal::traits<Derived>::MatrixType MatrixType;
|
| 71 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 72 |
+
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
| 73 |
+
typedef typename Eigen::internal::traits<SVDBase>::StorageIndex StorageIndex;
|
| 74 |
+
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
|
| 75 |
+
enum {
|
| 76 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 77 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 78 |
+
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
|
| 79 |
+
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
| 80 |
+
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
| 81 |
+
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
|
| 82 |
+
MatrixOptions = MatrixType::Options
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
|
| 86 |
+
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
|
| 87 |
+
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
|
| 88 |
+
|
| 89 |
+
Derived& derived() { return *static_cast<Derived*>(this); }
|
| 90 |
+
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
| 91 |
+
|
| 92 |
+
/** \returns the \a U matrix.
|
| 93 |
+
*
|
| 94 |
+
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
|
| 95 |
+
* the U matrix is n-by-n if you asked for \link Eigen::ComputeFullU ComputeFullU \endlink, and is n-by-m if you asked for \link Eigen::ComputeThinU ComputeThinU \endlink.
|
| 96 |
+
*
|
| 97 |
+
* The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
|
| 98 |
+
*
|
| 99 |
+
* This method asserts that you asked for \a U to be computed.
|
| 100 |
+
*/
|
| 101 |
+
const MatrixUType& matrixU() const
|
| 102 |
+
{
|
| 103 |
+
_check_compute_assertions();
|
| 104 |
+
eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
|
| 105 |
+
return m_matrixU;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/** \returns the \a V matrix.
|
| 109 |
+
*
|
| 110 |
+
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
|
| 111 |
+
* the V matrix is p-by-p if you asked for \link Eigen::ComputeFullV ComputeFullV \endlink, and is p-by-m if you asked for \link Eigen::ComputeThinV ComputeThinV \endlink.
|
| 112 |
+
*
|
| 113 |
+
* The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
|
| 114 |
+
*
|
| 115 |
+
* This method asserts that you asked for \a V to be computed.
|
| 116 |
+
*/
|
| 117 |
+
const MatrixVType& matrixV() const
|
| 118 |
+
{
|
| 119 |
+
_check_compute_assertions();
|
| 120 |
+
eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
|
| 121 |
+
return m_matrixV;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
/** \returns the vector of singular values.
|
| 125 |
+
*
|
| 126 |
+
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
|
| 127 |
+
* returned vector has size \a m. Singular values are always sorted in decreasing order.
|
| 128 |
+
*/
|
| 129 |
+
const SingularValuesType& singularValues() const
|
| 130 |
+
{
|
| 131 |
+
_check_compute_assertions();
|
| 132 |
+
return m_singularValues;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
/** \returns the number of singular values that are not exactly 0 */
|
| 136 |
+
Index nonzeroSingularValues() const
|
| 137 |
+
{
|
| 138 |
+
_check_compute_assertions();
|
| 139 |
+
return m_nonzeroSingularValues;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
/** \returns the rank of the matrix of which \c *this is the SVD.
|
| 143 |
+
*
|
| 144 |
+
* \note This method has to determine which singular values should be considered nonzero.
|
| 145 |
+
* For that, it uses the threshold value that you can control by calling
|
| 146 |
+
* setThreshold(const RealScalar&).
|
| 147 |
+
*/
|
| 148 |
+
inline Index rank() const
|
| 149 |
+
{
|
| 150 |
+
using std::abs;
|
| 151 |
+
_check_compute_assertions();
|
| 152 |
+
if(m_singularValues.size()==0) return 0;
|
| 153 |
+
RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());
|
| 154 |
+
Index i = m_nonzeroSingularValues-1;
|
| 155 |
+
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
|
| 156 |
+
return i+1;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
|
| 160 |
+
* which need to determine when singular values are to be considered nonzero.
|
| 161 |
+
* This is not used for the SVD decomposition itself.
|
| 162 |
+
*
|
| 163 |
+
* When it needs to get the threshold value, Eigen calls threshold().
|
| 164 |
+
* The default is \c NumTraits<Scalar>::epsilon()
|
| 165 |
+
*
|
| 166 |
+
* \param threshold The new value to use as the threshold.
|
| 167 |
+
*
|
| 168 |
+
* A singular value will be considered nonzero if its value is strictly greater than
|
| 169 |
+
* \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
|
| 170 |
+
*
|
| 171 |
+
* If you want to come back to the default behavior, call setThreshold(Default_t)
|
| 172 |
+
*/
|
| 173 |
+
Derived& setThreshold(const RealScalar& threshold)
|
| 174 |
+
{
|
| 175 |
+
m_usePrescribedThreshold = true;
|
| 176 |
+
m_prescribedThreshold = threshold;
|
| 177 |
+
return derived();
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
/** Allows to come back to the default behavior, letting Eigen use its default formula for
|
| 181 |
+
* determining the threshold.
|
| 182 |
+
*
|
| 183 |
+
* You should pass the special object Eigen::Default as parameter here.
|
| 184 |
+
* \code svd.setThreshold(Eigen::Default); \endcode
|
| 185 |
+
*
|
| 186 |
+
* See the documentation of setThreshold(const RealScalar&).
|
| 187 |
+
*/
|
| 188 |
+
Derived& setThreshold(Default_t)
|
| 189 |
+
{
|
| 190 |
+
m_usePrescribedThreshold = false;
|
| 191 |
+
return derived();
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
/** Returns the threshold that will be used by certain methods such as rank().
|
| 195 |
+
*
|
| 196 |
+
* See the documentation of setThreshold(const RealScalar&).
|
| 197 |
+
*/
|
| 198 |
+
RealScalar threshold() const
|
| 199 |
+
{
|
| 200 |
+
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
|
| 201 |
+
// this temporary is needed to workaround a MSVC issue
|
| 202 |
+
Index diagSize = (std::max<Index>)(1,m_diagSize);
|
| 203 |
+
return m_usePrescribedThreshold ? m_prescribedThreshold
|
| 204 |
+
: RealScalar(diagSize)*NumTraits<Scalar>::epsilon();
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
|
| 208 |
+
inline bool computeU() const { return m_computeFullU || m_computeThinU; }
|
| 209 |
+
/** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
|
| 210 |
+
inline bool computeV() const { return m_computeFullV || m_computeThinV; }
|
| 211 |
+
|
| 212 |
+
inline Index rows() const { return m_rows; }
|
| 213 |
+
inline Index cols() const { return m_cols; }
|
| 214 |
+
|
| 215 |
+
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
| 216 |
+
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
|
| 217 |
+
*
|
| 218 |
+
* \param b the right-hand-side of the equation to solve.
|
| 219 |
+
*
|
| 220 |
+
* \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
|
| 221 |
+
*
|
| 222 |
+
* \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
|
| 223 |
+
* In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
|
| 224 |
+
*/
|
| 225 |
+
template<typename Rhs>
|
| 226 |
+
inline const Solve<Derived, Rhs>
|
| 227 |
+
solve(const MatrixBase<Rhs>& b) const;
|
| 228 |
+
#endif
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
/** \brief Reports whether previous computation was successful.
|
| 232 |
+
*
|
| 233 |
+
* \returns \c Success if computation was successful.
|
| 234 |
+
*/
|
| 235 |
+
EIGEN_DEVICE_FUNC
|
| 236 |
+
ComputationInfo info() const
|
| 237 |
+
{
|
| 238 |
+
eigen_assert(m_isInitialized && "SVD is not initialized.");
|
| 239 |
+
return m_info;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
| 243 |
+
template<typename RhsType, typename DstType>
|
| 244 |
+
void _solve_impl(const RhsType &rhs, DstType &dst) const;
|
| 245 |
+
|
| 246 |
+
template<bool Conjugate, typename RhsType, typename DstType>
|
| 247 |
+
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
|
| 248 |
+
#endif
|
| 249 |
+
|
| 250 |
+
protected:
|
| 251 |
+
|
| 252 |
+
static void check_template_parameters()
|
| 253 |
+
{
|
| 254 |
+
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
void _check_compute_assertions() const {
|
| 258 |
+
eigen_assert(m_isInitialized && "SVD is not initialized.");
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
template<bool Transpose_, typename Rhs>
|
| 262 |
+
void _check_solve_assertion(const Rhs& b) const {
|
| 263 |
+
EIGEN_ONLY_USED_FOR_DEBUG(b);
|
| 264 |
+
_check_compute_assertions();
|
| 265 |
+
eigen_assert(computeU() && computeV() && "SVDBase::solve(): Both unitaries U and V are required to be computed (thin unitaries suffice).");
|
| 266 |
+
eigen_assert((Transpose_?cols():rows())==b.rows() && "SVDBase::solve(): invalid number of rows of the right hand side matrix b");
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
// return true if already allocated
|
| 270 |
+
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
|
| 271 |
+
|
| 272 |
+
MatrixUType m_matrixU;
|
| 273 |
+
MatrixVType m_matrixV;
|
| 274 |
+
SingularValuesType m_singularValues;
|
| 275 |
+
ComputationInfo m_info;
|
| 276 |
+
bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
|
| 277 |
+
bool m_computeFullU, m_computeThinU;
|
| 278 |
+
bool m_computeFullV, m_computeThinV;
|
| 279 |
+
unsigned int m_computationOptions;
|
| 280 |
+
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
|
| 281 |
+
RealScalar m_prescribedThreshold;
|
| 282 |
+
|
| 283 |
+
/** \brief Default Constructor.
|
| 284 |
+
*
|
| 285 |
+
* Default constructor of SVDBase
|
| 286 |
+
*/
|
| 287 |
+
SVDBase()
|
| 288 |
+
: m_info(Success),
|
| 289 |
+
m_isInitialized(false),
|
| 290 |
+
m_isAllocated(false),
|
| 291 |
+
m_usePrescribedThreshold(false),
|
| 292 |
+
m_computeFullU(false),
|
| 293 |
+
m_computeThinU(false),
|
| 294 |
+
m_computeFullV(false),
|
| 295 |
+
m_computeThinV(false),
|
| 296 |
+
m_computationOptions(0),
|
| 297 |
+
m_rows(-1), m_cols(-1), m_diagSize(0)
|
| 298 |
+
{
|
| 299 |
+
check_template_parameters();
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
};
|
| 304 |
+
|
| 305 |
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
| 306 |
+
template<typename Derived>
|
| 307 |
+
template<typename RhsType, typename DstType>
|
| 308 |
+
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
| 309 |
+
{
|
| 310 |
+
// A = U S V^*
|
| 311 |
+
// So A^{-1} = V S^{-1} U^*
|
| 312 |
+
|
| 313 |
+
Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
|
| 314 |
+
Index l_rank = rank();
|
| 315 |
+
tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
|
| 316 |
+
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
|
| 317 |
+
dst = m_matrixV.leftCols(l_rank) * tmp;
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
template<typename Derived>
|
| 321 |
+
template<bool Conjugate, typename RhsType, typename DstType>
|
| 322 |
+
void SVDBase<Derived>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
|
| 323 |
+
{
|
| 324 |
+
// A = U S V^*
|
| 325 |
+
// So A^{-*} = U S^{-1} V^*
|
| 326 |
+
// And A^{-T} = U_conj S^{-1} V^T
|
| 327 |
+
Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
|
| 328 |
+
Index l_rank = rank();
|
| 329 |
+
|
| 330 |
+
tmp.noalias() = m_matrixV.leftCols(l_rank).transpose().template conjugateIf<Conjugate>() * rhs;
|
| 331 |
+
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
|
| 332 |
+
dst = m_matrixU.template conjugateIf<!Conjugate>().leftCols(l_rank) * tmp;
|
| 333 |
+
}
|
| 334 |
+
#endif
|
| 335 |
+
|
| 336 |
+
template<typename MatrixType>
|
| 337 |
+
bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
|
| 338 |
+
{
|
| 339 |
+
eigen_assert(rows >= 0 && cols >= 0);
|
| 340 |
+
|
| 341 |
+
if (m_isAllocated &&
|
| 342 |
+
rows == m_rows &&
|
| 343 |
+
cols == m_cols &&
|
| 344 |
+
computationOptions == m_computationOptions)
|
| 345 |
+
{
|
| 346 |
+
return true;
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
m_rows = rows;
|
| 350 |
+
m_cols = cols;
|
| 351 |
+
m_info = Success;
|
| 352 |
+
m_isInitialized = false;
|
| 353 |
+
m_isAllocated = true;
|
| 354 |
+
m_computationOptions = computationOptions;
|
| 355 |
+
m_computeFullU = (computationOptions & ComputeFullU) != 0;
|
| 356 |
+
m_computeThinU = (computationOptions & ComputeThinU) != 0;
|
| 357 |
+
m_computeFullV = (computationOptions & ComputeFullV) != 0;
|
| 358 |
+
m_computeThinV = (computationOptions & ComputeThinV) != 0;
|
| 359 |
+
eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
|
| 360 |
+
eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
|
| 361 |
+
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
|
| 362 |
+
"SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
|
| 363 |
+
|
| 364 |
+
m_diagSize = (std::min)(m_rows, m_cols);
|
| 365 |
+
m_singularValues.resize(m_diagSize);
|
| 366 |
+
if(RowsAtCompileTime==Dynamic)
|
| 367 |
+
m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);
|
| 368 |
+
if(ColsAtCompileTime==Dynamic)
|
| 369 |
+
m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
|
| 370 |
+
|
| 371 |
+
return false;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
}// end namespace
|
| 375 |
+
|
| 376 |
+
#endif // EIGEN_SVDBASE_H
|
include/eigen/Eigen/src/SVD/UpperBidiagonalization.h
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
|
| 5 |
+
// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 6 |
+
//
|
| 7 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 8 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 9 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 10 |
+
|
| 11 |
+
#ifndef EIGEN_BIDIAGONALIZATION_H
|
| 12 |
+
#define EIGEN_BIDIAGONALIZATION_H
|
| 13 |
+
|
| 14 |
+
namespace Eigen {
|
| 15 |
+
|
| 16 |
+
namespace internal {
|
| 17 |
+
// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
|
| 18 |
+
// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
|
| 19 |
+
|
| 20 |
+
template<typename _MatrixType> class UpperBidiagonalization
|
| 21 |
+
{
|
| 22 |
+
public:
|
| 23 |
+
|
| 24 |
+
typedef _MatrixType MatrixType;
|
| 25 |
+
enum {
|
| 26 |
+
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
| 27 |
+
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
| 28 |
+
ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
|
| 29 |
+
};
|
| 30 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 31 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 32 |
+
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
|
| 33 |
+
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
|
| 34 |
+
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
|
| 35 |
+
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;
|
| 36 |
+
typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
|
| 37 |
+
typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
|
| 38 |
+
typedef HouseholderSequence<
|
| 39 |
+
const MatrixType,
|
| 40 |
+
const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type
|
| 41 |
+
> HouseholderUSequenceType;
|
| 42 |
+
typedef HouseholderSequence<
|
| 43 |
+
const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
|
| 44 |
+
Diagonal<const MatrixType,1>,
|
| 45 |
+
OnTheRight
|
| 46 |
+
> HouseholderVSequenceType;
|
| 47 |
+
|
| 48 |
+
/**
|
| 49 |
+
* \brief Default Constructor.
|
| 50 |
+
*
|
| 51 |
+
* The default constructor is useful in cases in which the user intends to
|
| 52 |
+
* perform decompositions via Bidiagonalization::compute(const MatrixType&).
|
| 53 |
+
*/
|
| 54 |
+
UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
|
| 55 |
+
|
| 56 |
+
explicit UpperBidiagonalization(const MatrixType& matrix)
|
| 57 |
+
: m_householder(matrix.rows(), matrix.cols()),
|
| 58 |
+
m_bidiagonal(matrix.cols(), matrix.cols()),
|
| 59 |
+
m_isInitialized(false)
|
| 60 |
+
{
|
| 61 |
+
compute(matrix);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
UpperBidiagonalization& compute(const MatrixType& matrix);
|
| 65 |
+
UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);
|
| 66 |
+
|
| 67 |
+
const MatrixType& householder() const { return m_householder; }
|
| 68 |
+
const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
|
| 69 |
+
|
| 70 |
+
const HouseholderUSequenceType householderU() const
|
| 71 |
+
{
|
| 72 |
+
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
|
| 73 |
+
return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
|
| 77 |
+
{
|
| 78 |
+
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
|
| 79 |
+
return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
|
| 80 |
+
.setLength(m_householder.cols()-1)
|
| 81 |
+
.setShift(1);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
protected:
|
| 85 |
+
MatrixType m_householder;
|
| 86 |
+
BidiagonalType m_bidiagonal;
|
| 87 |
+
bool m_isInitialized;
|
| 88 |
+
};
|
| 89 |
+
|
| 90 |
+
// Standard upper bidiagonalization without fancy optimizations
|
| 91 |
+
// This version should be faster for small matrix size
|
| 92 |
+
template<typename MatrixType>
|
| 93 |
+
void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
|
| 94 |
+
typename MatrixType::RealScalar *diagonal,
|
| 95 |
+
typename MatrixType::RealScalar *upper_diagonal,
|
| 96 |
+
typename MatrixType::Scalar* tempData = 0)
|
| 97 |
+
{
|
| 98 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 99 |
+
|
| 100 |
+
Index rows = mat.rows();
|
| 101 |
+
Index cols = mat.cols();
|
| 102 |
+
|
| 103 |
+
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
|
| 104 |
+
TempType tempVector;
|
| 105 |
+
if(tempData==0)
|
| 106 |
+
{
|
| 107 |
+
tempVector.resize(rows);
|
| 108 |
+
tempData = tempVector.data();
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
|
| 112 |
+
{
|
| 113 |
+
Index remainingRows = rows - k;
|
| 114 |
+
Index remainingCols = cols - k - 1;
|
| 115 |
+
|
| 116 |
+
// construct left householder transform in-place in A
|
| 117 |
+
mat.col(k).tail(remainingRows)
|
| 118 |
+
.makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
|
| 119 |
+
// apply householder transform to remaining part of A on the left
|
| 120 |
+
mat.bottomRightCorner(remainingRows, remainingCols)
|
| 121 |
+
.applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
|
| 122 |
+
|
| 123 |
+
if(k == cols-1) break;
|
| 124 |
+
|
| 125 |
+
// construct right householder transform in-place in mat
|
| 126 |
+
mat.row(k).tail(remainingCols)
|
| 127 |
+
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
|
| 128 |
+
// apply householder transform to remaining part of mat on the left
|
| 129 |
+
mat.bottomRightCorner(remainingRows-1, remainingCols)
|
| 130 |
+
.applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData);
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
/** \internal
|
| 135 |
+
* Helper routine for the block reduction to upper bidiagonal form.
|
| 136 |
+
*
|
| 137 |
+
* Let's partition the matrix A:
|
| 138 |
+
*
|
| 139 |
+
* | A00 A01 |
|
| 140 |
+
* A = | |
|
| 141 |
+
* | A10 A11 |
|
| 142 |
+
*
|
| 143 |
+
* This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10]
|
| 144 |
+
* and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11
|
| 145 |
+
* is updated using matrix-matrix products:
|
| 146 |
+
* A22 -= V * Y^T - X * U^T
|
| 147 |
+
* where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01
|
| 148 |
+
* respectively, and the update matrices X and Y are computed during the reduction.
|
| 149 |
+
*
|
| 150 |
+
*/
|
| 151 |
+
template<typename MatrixType>
|
| 152 |
+
void upperbidiagonalization_blocked_helper(MatrixType& A,
|
| 153 |
+
typename MatrixType::RealScalar *diagonal,
|
| 154 |
+
typename MatrixType::RealScalar *upper_diagonal,
|
| 155 |
+
Index bs,
|
| 156 |
+
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
|
| 157 |
+
traits<MatrixType>::Flags & RowMajorBit> > X,
|
| 158 |
+
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
|
| 159 |
+
traits<MatrixType>::Flags & RowMajorBit> > Y)
|
| 160 |
+
{
|
| 161 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 162 |
+
typedef typename MatrixType::RealScalar RealScalar;
|
| 163 |
+
typedef typename NumTraits<RealScalar>::Literal Literal;
|
| 164 |
+
static const int StorageOrder =
|
| 165 |
+
(traits<MatrixType>::Flags & RowMajorBit) ? RowMajor : ColMajor;
|
| 166 |
+
typedef InnerStride<StorageOrder == ColMajor ? 1 : Dynamic> ColInnerStride;
|
| 167 |
+
typedef InnerStride<StorageOrder == ColMajor ? Dynamic : 1> RowInnerStride;
|
| 168 |
+
typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
|
| 169 |
+
typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
|
| 170 |
+
typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
|
| 171 |
+
|
| 172 |
+
Index brows = A.rows();
|
| 173 |
+
Index bcols = A.cols();
|
| 174 |
+
|
| 175 |
+
Scalar tau_u, tau_u_prev(0), tau_v;
|
| 176 |
+
|
| 177 |
+
for(Index k = 0; k < bs; ++k)
|
| 178 |
+
{
|
| 179 |
+
Index remainingRows = brows - k;
|
| 180 |
+
Index remainingCols = bcols - k - 1;
|
| 181 |
+
|
| 182 |
+
SubMatType X_k1( X.block(k,0, remainingRows,k) );
|
| 183 |
+
SubMatType V_k1( A.block(k,0, remainingRows,k) );
|
| 184 |
+
|
| 185 |
+
// 1 - update the k-th column of A
|
| 186 |
+
SubColumnType v_k = A.col(k).tail(remainingRows);
|
| 187 |
+
v_k -= V_k1 * Y.row(k).head(k).adjoint();
|
| 188 |
+
if(k) v_k -= X_k1 * A.col(k).head(k);
|
| 189 |
+
|
| 190 |
+
// 2 - construct left Householder transform in-place
|
| 191 |
+
v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
|
| 192 |
+
|
| 193 |
+
if(k+1<bcols)
|
| 194 |
+
{
|
| 195 |
+
SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
|
| 196 |
+
SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
|
| 197 |
+
|
| 198 |
+
// this eases the application of Householder transforAions
|
| 199 |
+
// A(k,k) will store tau_v later
|
| 200 |
+
A(k,k) = Scalar(1);
|
| 201 |
+
|
| 202 |
+
// 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
|
| 203 |
+
{
|
| 204 |
+
SubColumnType y_k( Y.col(k).tail(remainingCols) );
|
| 205 |
+
|
| 206 |
+
// let's use the beginning of column k of Y as a temporary vector
|
| 207 |
+
SubColumnType tmp( Y.col(k).head(k) );
|
| 208 |
+
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
|
| 209 |
+
tmp.noalias() = V_k1.adjoint() * v_k;
|
| 210 |
+
y_k.noalias() -= Y_k.leftCols(k) * tmp;
|
| 211 |
+
tmp.noalias() = X_k1.adjoint() * v_k;
|
| 212 |
+
y_k.noalias() -= U_k1.adjoint() * tmp;
|
| 213 |
+
y_k *= numext::conj(tau_v);
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
// 4 - update k-th row of A (it will become u_k)
|
| 217 |
+
SubRowType u_k( A.row(k).tail(remainingCols) );
|
| 218 |
+
u_k = u_k.conjugate();
|
| 219 |
+
{
|
| 220 |
+
u_k -= Y_k * A.row(k).head(k+1).adjoint();
|
| 221 |
+
if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
// 5 - construct right Householder transform in-place
|
| 225 |
+
u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
|
| 226 |
+
|
| 227 |
+
// this eases the application of Householder transformations
|
| 228 |
+
// A(k,k+1) will store tau_u later
|
| 229 |
+
A(k,k+1) = Scalar(1);
|
| 230 |
+
|
| 231 |
+
// 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
|
| 232 |
+
{
|
| 233 |
+
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
|
| 234 |
+
|
| 235 |
+
// let's use the beginning of column k of X as a temporary vectors
|
| 236 |
+
// note that tmp0 and tmp1 overlaps
|
| 237 |
+
SubColumnType tmp0 ( X.col(k).head(k) ),
|
| 238 |
+
tmp1 ( X.col(k).head(k+1) );
|
| 239 |
+
|
| 240 |
+
x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
|
| 241 |
+
tmp0.noalias() = U_k1 * u_k.transpose();
|
| 242 |
+
x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
|
| 243 |
+
tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
|
| 244 |
+
x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
|
| 245 |
+
x_k *= numext::conj(tau_u);
|
| 246 |
+
tau_u = numext::conj(tau_u);
|
| 247 |
+
u_k = u_k.conjugate();
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
|
| 251 |
+
tau_u_prev = tau_u;
|
| 252 |
+
}
|
| 253 |
+
else
|
| 254 |
+
A.coeffRef(k-1,k) = tau_u_prev;
|
| 255 |
+
|
| 256 |
+
A.coeffRef(k,k) = tau_v;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
if(bs<bcols)
|
| 260 |
+
A.coeffRef(bs-1,bs) = tau_u_prev;
|
| 261 |
+
|
| 262 |
+
// update A22
|
| 263 |
+
if(bcols>bs && brows>bs)
|
| 264 |
+
{
|
| 265 |
+
SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
|
| 266 |
+
SubMatType A10( A.block(bs,0, brows-bs,bs) );
|
| 267 |
+
SubMatType A01( A.block(0,bs, bs,bcols-bs) );
|
| 268 |
+
Scalar tmp = A01(bs-1,0);
|
| 269 |
+
A01(bs-1,0) = Literal(1);
|
| 270 |
+
A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
|
| 271 |
+
A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
|
| 272 |
+
A01(bs-1,0) = tmp;
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
/** \internal
|
| 277 |
+
*
|
| 278 |
+
* Implementation of a block-bidiagonal reduction.
|
| 279 |
+
* It is based on the following paper:
|
| 280 |
+
* The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.
|
| 281 |
+
* by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)
|
| 282 |
+
* section 3.3
|
| 283 |
+
*/
|
| 284 |
+
template<typename MatrixType, typename BidiagType>
|
| 285 |
+
void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,
|
| 286 |
+
Index maxBlockSize=32,
|
| 287 |
+
typename MatrixType::Scalar* /*tempData*/ = 0)
|
| 288 |
+
{
|
| 289 |
+
typedef typename MatrixType::Scalar Scalar;
|
| 290 |
+
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
|
| 291 |
+
|
| 292 |
+
Index rows = A.rows();
|
| 293 |
+
Index cols = A.cols();
|
| 294 |
+
Index size = (std::min)(rows, cols);
|
| 295 |
+
|
| 296 |
+
// X and Y are work space
|
| 297 |
+
enum { StorageOrder = (traits<MatrixType>::Flags & RowMajorBit) ? RowMajor : ColMajor };
|
| 298 |
+
Matrix<Scalar,
|
| 299 |
+
MatrixType::RowsAtCompileTime,
|
| 300 |
+
Dynamic,
|
| 301 |
+
StorageOrder,
|
| 302 |
+
MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
|
| 303 |
+
Matrix<Scalar,
|
| 304 |
+
MatrixType::ColsAtCompileTime,
|
| 305 |
+
Dynamic,
|
| 306 |
+
StorageOrder,
|
| 307 |
+
MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
|
| 308 |
+
Index blockSize = (std::min)(maxBlockSize,size);
|
| 309 |
+
|
| 310 |
+
Index k = 0;
|
| 311 |
+
for(k = 0; k < size; k += blockSize)
|
| 312 |
+
{
|
| 313 |
+
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
| 314 |
+
Index brows = rows - k; // rows of the block
|
| 315 |
+
Index bcols = cols - k; // columns of the block
|
| 316 |
+
|
| 317 |
+
// partition the matrix A:
|
| 318 |
+
//
|
| 319 |
+
// | A00 A01 A02 |
|
| 320 |
+
// | |
|
| 321 |
+
// A = | A10 A11 A12 |
|
| 322 |
+
// | |
|
| 323 |
+
// | A20 A21 A22 |
|
| 324 |
+
//
|
| 325 |
+
// where A11 is a bs x bs diagonal block,
|
| 326 |
+
// and let:
|
| 327 |
+
// | A11 A12 |
|
| 328 |
+
// B = | |
|
| 329 |
+
// | A21 A22 |
|
| 330 |
+
|
| 331 |
+
BlockType B = A.block(k,k,brows,bcols);
|
| 332 |
+
|
| 333 |
+
// This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
|
| 334 |
+
// Finally, the algorithm continue on the updated A22.
|
| 335 |
+
//
|
| 336 |
+
// However, if B is too small, or A22 empty, then let's use an unblocked strategy
|
| 337 |
+
if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
|
| 338 |
+
{
|
| 339 |
+
upperbidiagonalization_inplace_unblocked(B,
|
| 340 |
+
&(bidiagonal.template diagonal<0>().coeffRef(k)),
|
| 341 |
+
&(bidiagonal.template diagonal<1>().coeffRef(k)),
|
| 342 |
+
X.data()
|
| 343 |
+
);
|
| 344 |
+
break; // We're done
|
| 345 |
+
}
|
| 346 |
+
else
|
| 347 |
+
{
|
| 348 |
+
upperbidiagonalization_blocked_helper<BlockType>( B,
|
| 349 |
+
&(bidiagonal.template diagonal<0>().coeffRef(k)),
|
| 350 |
+
&(bidiagonal.template diagonal<1>().coeffRef(k)),
|
| 351 |
+
bs,
|
| 352 |
+
X.topLeftCorner(brows,bs),
|
| 353 |
+
Y.topLeftCorner(bcols,bs)
|
| 354 |
+
);
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
template<typename _MatrixType>
|
| 360 |
+
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)
|
| 361 |
+
{
|
| 362 |
+
Index rows = matrix.rows();
|
| 363 |
+
Index cols = matrix.cols();
|
| 364 |
+
EIGEN_ONLY_USED_FOR_DEBUG(cols);
|
| 365 |
+
|
| 366 |
+
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
|
| 367 |
+
|
| 368 |
+
m_householder = matrix;
|
| 369 |
+
|
| 370 |
+
ColVectorType temp(rows);
|
| 371 |
+
|
| 372 |
+
upperbidiagonalization_inplace_unblocked(m_householder,
|
| 373 |
+
&(m_bidiagonal.template diagonal<0>().coeffRef(0)),
|
| 374 |
+
&(m_bidiagonal.template diagonal<1>().coeffRef(0)),
|
| 375 |
+
temp.data());
|
| 376 |
+
|
| 377 |
+
m_isInitialized = true;
|
| 378 |
+
return *this;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
template<typename _MatrixType>
|
| 382 |
+
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
|
| 383 |
+
{
|
| 384 |
+
Index rows = matrix.rows();
|
| 385 |
+
Index cols = matrix.cols();
|
| 386 |
+
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
| 387 |
+
EIGEN_ONLY_USED_FOR_DEBUG(cols);
|
| 388 |
+
|
| 389 |
+
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
|
| 390 |
+
|
| 391 |
+
m_householder = matrix;
|
| 392 |
+
upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);
|
| 393 |
+
|
| 394 |
+
m_isInitialized = true;
|
| 395 |
+
return *this;
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
#if 0
|
| 399 |
+
/** \return the Householder QR decomposition of \c *this.
|
| 400 |
+
*
|
| 401 |
+
* \sa class Bidiagonalization
|
| 402 |
+
*/
|
| 403 |
+
template<typename Derived>
|
| 404 |
+
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
|
| 405 |
+
MatrixBase<Derived>::bidiagonalization() const
|
| 406 |
+
{
|
| 407 |
+
return UpperBidiagonalization<PlainObject>(eval());
|
| 408 |
+
}
|
| 409 |
+
#endif
|
| 410 |
+
|
| 411 |
+
} // end namespace internal
|
| 412 |
+
|
| 413 |
+
} // end namespace Eigen
|
| 414 |
+
|
| 415 |
+
#endif // EIGEN_BIDIAGONALIZATION_H
|
include/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
| 6 |
+
//
|
| 7 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 8 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 9 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 10 |
+
|
| 11 |
+
// This file is a base class plugin containing common coefficient wise functions.
|
| 12 |
+
|
| 13 |
+
/** \returns an expression of the difference of \c *this and \a other
|
| 14 |
+
*
|
| 15 |
+
* \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
|
| 16 |
+
*
|
| 17 |
+
* \sa class CwiseBinaryOp, operator-=()
|
| 18 |
+
*/
|
| 19 |
+
EIGEN_MAKE_CWISE_BINARY_OP(operator-,difference)
|
| 20 |
+
|
| 21 |
+
/** \returns an expression of the sum of \c *this and \a other
|
| 22 |
+
*
|
| 23 |
+
* \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
|
| 24 |
+
*
|
| 25 |
+
* \sa class CwiseBinaryOp, operator+=()
|
| 26 |
+
*/
|
| 27 |
+
EIGEN_MAKE_CWISE_BINARY_OP(operator+,sum)
|
| 28 |
+
|
| 29 |
+
/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
|
| 30 |
+
*
|
| 31 |
+
* The template parameter \a CustomBinaryOp is the type of the functor
|
| 32 |
+
* of the custom operator (see class CwiseBinaryOp for an example)
|
| 33 |
+
*
|
| 34 |
+
* Here is an example illustrating the use of custom functors:
|
| 35 |
+
* \include class_CwiseBinaryOp.cpp
|
| 36 |
+
* Output: \verbinclude class_CwiseBinaryOp.out
|
| 37 |
+
*
|
| 38 |
+
* \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct()
|
| 39 |
+
*/
|
| 40 |
+
template<typename CustomBinaryOp, typename OtherDerived>
|
| 41 |
+
EIGEN_DEVICE_FUNC
|
| 42 |
+
EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
|
| 43 |
+
binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const
|
| 44 |
+
{
|
| 45 |
+
return CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other.derived(), func);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
| 50 |
+
EIGEN_MAKE_SCALAR_BINARY_OP(operator*,product)
|
| 51 |
+
#else
|
| 52 |
+
/** \returns an expression of \c *this scaled by the scalar factor \a scalar
|
| 53 |
+
*
|
| 54 |
+
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
|
| 55 |
+
*/
|
| 56 |
+
template<typename T>
|
| 57 |
+
const CwiseBinaryOp<internal::scalar_product_op<Scalar,T>,Derived,Constant<T> > operator*(const T& scalar) const;
|
| 58 |
+
/** \returns an expression of \a expr scaled by the scalar factor \a scalar
|
| 59 |
+
*
|
| 60 |
+
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
|
| 61 |
+
*/
|
| 62 |
+
template<typename T> friend
|
| 63 |
+
const CwiseBinaryOp<internal::scalar_product_op<T,Scalar>,Constant<T>,Derived> operator*(const T& scalar, const StorageBaseType& expr);
|
| 64 |
+
#endif
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
| 69 |
+
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(operator/,quotient)
|
| 70 |
+
#else
|
| 71 |
+
/** \returns an expression of \c *this divided by the scalar value \a scalar
|
| 72 |
+
*
|
| 73 |
+
* \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression.
|
| 74 |
+
*/
|
| 75 |
+
template<typename T>
|
| 76 |
+
const CwiseBinaryOp<internal::scalar_quotient_op<Scalar,T>,Derived,Constant<T> > operator/(const T& scalar) const;
|
| 77 |
+
#endif
|
| 78 |
+
|
| 79 |
+
/** \returns an expression of the coefficient-wise boolean \b and operator of \c *this and \a other
|
| 80 |
+
*
|
| 81 |
+
* \warning this operator is for expression of bool only.
|
| 82 |
+
*
|
| 83 |
+
* Example: \include Cwise_boolean_and.cpp
|
| 84 |
+
* Output: \verbinclude Cwise_boolean_and.out
|
| 85 |
+
*
|
| 86 |
+
* \sa operator||(), select()
|
| 87 |
+
*/
|
| 88 |
+
template<typename OtherDerived>
|
| 89 |
+
EIGEN_DEVICE_FUNC
|
| 90 |
+
inline const CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>
|
| 91 |
+
operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
|
| 92 |
+
{
|
| 93 |
+
EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
|
| 94 |
+
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
|
| 95 |
+
return CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>(derived(),other.derived());
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
/** \returns an expression of the coefficient-wise boolean \b or operator of \c *this and \a other
|
| 99 |
+
*
|
| 100 |
+
* \warning this operator is for expression of bool only.
|
| 101 |
+
*
|
| 102 |
+
* Example: \include Cwise_boolean_or.cpp
|
| 103 |
+
* Output: \verbinclude Cwise_boolean_or.out
|
| 104 |
+
*
|
| 105 |
+
* \sa operator&&(), select()
|
| 106 |
+
*/
|
| 107 |
+
template<typename OtherDerived>
|
| 108 |
+
EIGEN_DEVICE_FUNC
|
| 109 |
+
inline const CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>
|
| 110 |
+
operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
|
| 111 |
+
{
|
| 112 |
+
EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
|
| 113 |
+
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
|
| 114 |
+
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
|
| 115 |
+
}
|
include/eigen/Eigen/src/plugins/IndexedViewMethods.h
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file is part of Eigen, a lightweight C++ template library
|
| 2 |
+
// for linear algebra.
|
| 3 |
+
//
|
| 4 |
+
// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 5 |
+
//
|
| 6 |
+
// This Source Code Form is subject to the terms of the Mozilla
|
| 7 |
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
| 8 |
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
| 9 |
+
|
| 10 |
+
#if !defined(EIGEN_PARSED_BY_DOXYGEN)
|
| 11 |
+
|
| 12 |
+
// This file is automatically included twice to generate const and non-const versions
|
| 13 |
+
|
| 14 |
+
#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS
|
| 15 |
+
#define EIGEN_INDEXED_VIEW_METHOD_CONST const
|
| 16 |
+
#define EIGEN_INDEXED_VIEW_METHOD_TYPE ConstIndexedViewType
|
| 17 |
+
#else
|
| 18 |
+
#define EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 19 |
+
#define EIGEN_INDEXED_VIEW_METHOD_TYPE IndexedViewType
|
| 20 |
+
#endif
|
| 21 |
+
|
| 22 |
+
#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS
|
| 23 |
+
protected:
|
| 24 |
+
|
| 25 |
+
// define some aliases to ease readability
|
| 26 |
+
|
| 27 |
+
template<typename Indices>
|
| 28 |
+
struct IvcRowType : public internal::IndexedViewCompatibleType<Indices,RowsAtCompileTime> {};
|
| 29 |
+
|
| 30 |
+
template<typename Indices>
|
| 31 |
+
struct IvcColType : public internal::IndexedViewCompatibleType<Indices,ColsAtCompileTime> {};
|
| 32 |
+
|
| 33 |
+
template<typename Indices>
|
| 34 |
+
struct IvcType : public internal::IndexedViewCompatibleType<Indices,SizeAtCompileTime> {};
|
| 35 |
+
|
| 36 |
+
typedef typename internal::IndexedViewCompatibleType<Index,1>::type IvcIndex;
|
| 37 |
+
|
| 38 |
+
template<typename Indices>
|
| 39 |
+
typename IvcRowType<Indices>::type
|
| 40 |
+
ivcRow(const Indices& indices) const {
|
| 41 |
+
return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,RowsAtCompileTime>(derived().rows()),Specialized);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
template<typename Indices>
|
| 45 |
+
typename IvcColType<Indices>::type
|
| 46 |
+
ivcCol(const Indices& indices) const {
|
| 47 |
+
return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,ColsAtCompileTime>(derived().cols()),Specialized);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
template<typename Indices>
|
| 51 |
+
typename IvcColType<Indices>::type
|
| 52 |
+
ivcSize(const Indices& indices) const {
|
| 53 |
+
return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,SizeAtCompileTime>(derived().size()),Specialized);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
public:
|
| 57 |
+
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
template<typename RowIndices, typename ColIndices>
|
| 61 |
+
struct EIGEN_INDEXED_VIEW_METHOD_TYPE {
|
| 62 |
+
typedef IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,
|
| 63 |
+
typename IvcRowType<RowIndices>::type,
|
| 64 |
+
typename IvcColType<ColIndices>::type> type;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
// This is the generic version
|
| 68 |
+
|
| 69 |
+
template<typename RowIndices, typename ColIndices>
|
| 70 |
+
typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
|
| 71 |
+
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsIndexedView,
|
| 72 |
+
typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type >::type
|
| 73 |
+
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 74 |
+
{
|
| 75 |
+
return typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type
|
| 76 |
+
(derived(), ivcRow(rowIndices), ivcCol(colIndices));
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
// The following overload returns a Block<> object
|
| 80 |
+
|
| 81 |
+
template<typename RowIndices, typename ColIndices>
|
| 82 |
+
typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
|
| 83 |
+
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsBlock,
|
| 84 |
+
typename internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::BlockType>::type
|
| 85 |
+
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 86 |
+
{
|
| 87 |
+
typedef typename internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::BlockType BlockType;
|
| 88 |
+
typename IvcRowType<RowIndices>::type actualRowIndices = ivcRow(rowIndices);
|
| 89 |
+
typename IvcColType<ColIndices>::type actualColIndices = ivcCol(colIndices);
|
| 90 |
+
return BlockType(derived(),
|
| 91 |
+
internal::first(actualRowIndices),
|
| 92 |
+
internal::first(actualColIndices),
|
| 93 |
+
internal::index_list_size(actualRowIndices),
|
| 94 |
+
internal::index_list_size(actualColIndices));
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
// The following overload returns a Scalar
|
| 98 |
+
|
| 99 |
+
template<typename RowIndices, typename ColIndices>
|
| 100 |
+
typename internal::enable_if<internal::valid_indexed_view_overload<RowIndices,ColIndices>::value
|
| 101 |
+
&& internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsScalar,
|
| 102 |
+
CoeffReturnType >::type
|
| 103 |
+
operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 104 |
+
{
|
| 105 |
+
return Base::operator()(internal::eval_expr_given_size(rowIndices,rows()),internal::eval_expr_given_size(colIndices,cols()));
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
|
| 109 |
+
|
| 110 |
+
// The following three overloads are needed to handle raw Index[N] arrays.
|
| 111 |
+
|
| 112 |
+
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
|
| 113 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>
|
| 114 |
+
operator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 115 |
+
{
|
| 116 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>
|
| 117 |
+
(derived(), rowIndices, ivcCol(colIndices));
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
template<typename RowIndices, typename ColIndicesT, std::size_t ColIndicesN>
|
| 121 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcRowType<RowIndices>::type, const ColIndicesT (&)[ColIndicesN]>
|
| 122 |
+
operator()(const RowIndices& rowIndices, const ColIndicesT (&colIndices)[ColIndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 123 |
+
{
|
| 124 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcRowType<RowIndices>::type,const ColIndicesT (&)[ColIndicesN]>
|
| 125 |
+
(derived(), ivcRow(rowIndices), colIndices);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndicesT, std::size_t ColIndicesN>
|
| 129 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN], const ColIndicesT (&)[ColIndicesN]>
|
| 130 |
+
operator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndicesT (&colIndices)[ColIndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 131 |
+
{
|
| 132 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],const ColIndicesT (&)[ColIndicesN]>
|
| 133 |
+
(derived(), rowIndices, colIndices);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE
|
| 137 |
+
|
| 138 |
+
// Overloads for 1D vectors/arrays
|
| 139 |
+
|
| 140 |
+
template<typename Indices>
|
| 141 |
+
typename internal::enable_if<
|
| 142 |
+
IsRowMajor && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_valid_index_type<Indices>::value)),
|
| 143 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,typename IvcType<Indices>::type> >::type
|
| 144 |
+
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 145 |
+
{
|
| 146 |
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
| 147 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,typename IvcType<Indices>::type>
|
| 148 |
+
(derived(), IvcIndex(0), ivcCol(indices));
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
template<typename Indices>
|
| 152 |
+
typename internal::enable_if<
|
| 153 |
+
(!IsRowMajor) && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_valid_index_type<Indices>::value)),
|
| 154 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcType<Indices>::type,IvcIndex> >::type
|
| 155 |
+
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 156 |
+
{
|
| 157 |
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
| 158 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcType<Indices>::type,IvcIndex>
|
| 159 |
+
(derived(), ivcRow(indices), IvcIndex(0));
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
template<typename Indices>
|
| 163 |
+
typename internal::enable_if<
|
| 164 |
+
(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1) && (!internal::is_valid_index_type<Indices>::value) && (!symbolic::is_symbolic<Indices>::value),
|
| 165 |
+
VectorBlock<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,internal::array_size<Indices>::value> >::type
|
| 166 |
+
operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 167 |
+
{
|
| 168 |
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
| 169 |
+
typename IvcType<Indices>::type actualIndices = ivcSize(indices);
|
| 170 |
+
return VectorBlock<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,internal::array_size<Indices>::value>
|
| 171 |
+
(derived(), internal::first(actualIndices), internal::index_list_size(actualIndices));
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
template<typename IndexType>
|
| 175 |
+
typename internal::enable_if<symbolic::is_symbolic<IndexType>::value, CoeffReturnType >::type
|
| 176 |
+
operator()(const IndexType& id) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 177 |
+
{
|
| 178 |
+
return Base::operator()(internal::eval_expr_given_size(id,size()));
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
|
| 182 |
+
|
| 183 |
+
template<typename IndicesT, std::size_t IndicesN>
|
| 184 |
+
typename internal::enable_if<IsRowMajor,
|
| 185 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,const IndicesT (&)[IndicesN]> >::type
|
| 186 |
+
operator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 187 |
+
{
|
| 188 |
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
| 189 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,const IndicesT (&)[IndicesN]>
|
| 190 |
+
(derived(), IvcIndex(0), indices);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template<typename IndicesT, std::size_t IndicesN>
|
| 194 |
+
typename internal::enable_if<!IsRowMajor,
|
| 195 |
+
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const IndicesT (&)[IndicesN],IvcIndex> >::type
|
| 196 |
+
operator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 197 |
+
{
|
| 198 |
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
| 199 |
+
return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const IndicesT (&)[IndicesN],IvcIndex>
|
| 200 |
+
(derived(), indices, IvcIndex(0));
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE
|
| 204 |
+
|
| 205 |
+
#undef EIGEN_INDEXED_VIEW_METHOD_CONST
|
| 206 |
+
#undef EIGEN_INDEXED_VIEW_METHOD_TYPE
|
| 207 |
+
|
| 208 |
+
#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS
|
| 209 |
+
#define EIGEN_INDEXED_VIEW_METHOD_2ND_PASS
|
| 210 |
+
#include "IndexedViewMethods.h"
|
| 211 |
+
#undef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS
|
| 212 |
+
#endif
|
| 213 |
+
|
| 214 |
+
#else // EIGEN_PARSED_BY_DOXYGEN
|
| 215 |
+
|
| 216 |
+
/**
|
| 217 |
+
* \returns a generic submatrix view defined by the rows and columns indexed \a rowIndices and \a colIndices respectively.
|
| 218 |
+
*
|
| 219 |
+
* Each parameter must either be:
|
| 220 |
+
* - An integer indexing a single row or column
|
| 221 |
+
* - Eigen::all indexing the full set of respective rows or columns in increasing order
|
| 222 |
+
* - An ArithmeticSequence as returned by the Eigen::seq and Eigen::seqN functions
|
| 223 |
+
* - Any %Eigen's vector/array of integers or expressions
|
| 224 |
+
* - Plain C arrays: \c int[N]
|
| 225 |
+
* - And more generally any type exposing the following two member functions:
|
| 226 |
+
* \code
|
| 227 |
+
* <integral type> operator[](<integral type>) const;
|
| 228 |
+
* <integral type> size() const;
|
| 229 |
+
* \endcode
|
| 230 |
+
* where \c <integral \c type> stands for any integer type compatible with Eigen::Index (i.e. \c std::ptrdiff_t).
|
| 231 |
+
*
|
| 232 |
+
* The last statement implies compatibility with \c std::vector, \c std::valarray, \c std::array, many of the Range-v3's ranges, etc.
|
| 233 |
+
*
|
| 234 |
+
* If the submatrix can be represented using a starting position \c (i,j) and positive sizes \c (rows,columns), then this
|
| 235 |
+
* method will returns a Block object after extraction of the relevant information from the passed arguments. This is the case
|
| 236 |
+
* when all arguments are either:
|
| 237 |
+
* - An integer
|
| 238 |
+
* - Eigen::all
|
| 239 |
+
* - An ArithmeticSequence with compile-time increment strictly equal to 1, as returned by Eigen::seq(a,b), and Eigen::seqN(a,N).
|
| 240 |
+
*
|
| 241 |
+
* Otherwise a more general IndexedView<Derived,RowIndices',ColIndices'> object will be returned, after conversion of the inputs
|
| 242 |
+
* to more suitable types \c RowIndices' and \c ColIndices'.
|
| 243 |
+
*
|
| 244 |
+
* For 1D vectors and arrays, you better use the operator()(const Indices&) overload, which behave the same way but taking a single parameter.
|
| 245 |
+
*
|
| 246 |
+
* See also this <a href="https://stackoverflow.com/questions/46110917/eigen-replicate-items-along-one-dimension-without-useless-allocations">question</a> and its answer for an example of how to duplicate coefficients.
|
| 247 |
+
*
|
| 248 |
+
* \sa operator()(const Indices&), class Block, class IndexedView, DenseBase::block(Index,Index,Index,Index)
|
| 249 |
+
*/
|
| 250 |
+
template<typename RowIndices, typename ColIndices>
|
| 251 |
+
IndexedView_or_Block
|
| 252 |
+
operator()(const RowIndices& rowIndices, const ColIndices& colIndices);
|
| 253 |
+
|
| 254 |
+
/** This is an overload of operator()(const RowIndices&, const ColIndices&) for 1D vectors or arrays
|
| 255 |
+
*
|
| 256 |
+
* \only_for_vectors
|
| 257 |
+
*/
|
| 258 |
+
template<typename Indices>
|
| 259 |
+
IndexedView_or_VectorBlock
|
| 260 |
+
operator()(const Indices& indices);
|
| 261 |
+
|
| 262 |
+
#endif // EIGEN_PARSED_BY_DOXYGEN
|
include/eigen/doc/CoeffwiseMathFunctionsTable.dox
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage CoeffwiseMathFunctions Catalog of coefficient-wise math functions
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
<!-- <span style="font-size:300%; color:red; font-weight: 900;">!WORK IN PROGRESS!</span> -->
|
| 7 |
+
|
| 8 |
+
This table presents a catalog of the coefficient-wise math functions supported by %Eigen.
|
| 9 |
+
In this table, \c a, \c b, refer to Array objects or expressions, and \c m refers to a linear algebra Matrix/Vector object. Standard scalar types are abbreviated as follows:
|
| 10 |
+
- \c int: \c i32
|
| 11 |
+
- \c float: \c f
|
| 12 |
+
- \c double: \c d
|
| 13 |
+
- \c std::complex<float>: \c cf
|
| 14 |
+
- \c std::complex<double>: \c cd
|
| 15 |
+
|
| 16 |
+
For each row, the first column list the equivalent calls for arrays, and matrices when supported. Of course, all functions are available for matrices by first casting it as an array: \c m.array().
|
| 17 |
+
|
| 18 |
+
The third column gives some hints in the underlying scalar implementation. In most cases, %Eigen does not implement itself the math function but relies on the STL for standard scalar types, or user-provided functions for custom scalar types.
|
| 19 |
+
For instance, some simply calls the respective function of the STL while preserving <a href="http://en.cppreference.com/w/cpp/language/adl">argument-dependent lookup</a> for custom types.
|
| 20 |
+
The following:
|
| 21 |
+
\code
|
| 22 |
+
using std::foo;
|
| 23 |
+
foo(a[i]);
|
| 24 |
+
\endcode
|
| 25 |
+
means that the STL's function \c std::foo will be potentially called if it is compatible with the underlying scalar type. If not, then the user must ensure that an overload of the function foo is available for the given scalar type (usually defined in the same namespace as the given scalar type).
|
| 26 |
+
This also means that, unless specified, if the function \c std::foo is available only in some recent c++ versions (e.g., c++11), then the respective %Eigen's function/method will be usable on standard types only if the compiler support the required c++ version.
|
| 27 |
+
|
| 28 |
+
<table class="manual-hl">
|
| 29 |
+
<tr>
|
| 30 |
+
<th>API</th><th>Description</th><th>Default scalar implementation</th><th>SIMD</th>
|
| 31 |
+
</tr>
|
| 32 |
+
<tr><td colspan="4"></td></tr>
|
| 33 |
+
<tr><th colspan="4">Basic operations</th></tr>
|
| 34 |
+
<tr>
|
| 35 |
+
<td class="code">
|
| 36 |
+
\anchor cwisetable_abs
|
| 37 |
+
a.\link ArrayBase::abs abs\endlink(); \n
|
| 38 |
+
\link Eigen::abs abs\endlink(a); \n
|
| 39 |
+
m.\link MatrixBase::cwiseAbs cwiseAbs\endlink();
|
| 40 |
+
</td>
|
| 41 |
+
<td>absolute value (\f$ |a_i| \f$) </td>
|
| 42 |
+
<td class="code">
|
| 43 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/fabs">std::abs</a>; \n
|
| 44 |
+
abs(a[i]);
|
| 45 |
+
</td>
|
| 46 |
+
<td>SSE2, AVX (i32,f,d)</td>
|
| 47 |
+
</tr>
|
| 48 |
+
<tr>
|
| 49 |
+
<td class="code">
|
| 50 |
+
\anchor cwisetable_inverse
|
| 51 |
+
a.\link ArrayBase::inverse inverse\endlink(); \n
|
| 52 |
+
\link Eigen::inverse inverse\endlink(a); \n
|
| 53 |
+
m.\link MatrixBase::cwiseInverse cwiseInverse\endlink();
|
| 54 |
+
</td>
|
| 55 |
+
<td>inverse value (\f$ 1/a_i \f$) </td>
|
| 56 |
+
<td class="code">
|
| 57 |
+
1/a[i];
|
| 58 |
+
</td>
|
| 59 |
+
<td>All engines (f,d,fc,fd)</td>
|
| 60 |
+
</tr>
|
| 61 |
+
<tr>
|
| 62 |
+
<td class="code">
|
| 63 |
+
\anchor cwisetable_conj
|
| 64 |
+
a.\link ArrayBase::conjugate conjugate\endlink(); \n
|
| 65 |
+
\link Eigen::conj conj\endlink(a); \n
|
| 66 |
+
m.\link MatrixBase::conjugate conjugate\endlink();
|
| 67 |
+
</td>
|
| 68 |
+
<td><a href="https://en.wikipedia.org/wiki/Complex_conjugate">complex conjugate</a> (\f$ \bar{a_i} \f$),\n
|
| 69 |
+
no-op for real </td>
|
| 70 |
+
<td class="code">
|
| 71 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/complex/conj">std::conj</a>; \n
|
| 72 |
+
conj(a[i]);
|
| 73 |
+
</td>
|
| 74 |
+
<td>All engines (fc,fd)</td>
|
| 75 |
+
</tr>
|
| 76 |
+
<tr>
|
| 77 |
+
<td class="code">
|
| 78 |
+
\anchor cwisetable_arg
|
| 79 |
+
a.\link ArrayBase::arg arg\endlink(); \n
|
| 80 |
+
\link Eigen::arg arg\endlink(a); \n
|
| 81 |
+
m.\link MatrixBase::cwiseArg cwiseArg\endlink();
|
| 82 |
+
</td>
|
| 83 |
+
<td>phase angle of complex number</td>
|
| 84 |
+
<td class="code">
|
| 85 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/complex/arg">std::arg</a>; \n
|
| 86 |
+
arg(a[i]);
|
| 87 |
+
</td>
|
| 88 |
+
<td>All engines (fc,fd)</td>
|
| 89 |
+
</tr>
|
| 90 |
+
<tr>
|
| 91 |
+
<th colspan="4">Exponential functions</th>
|
| 92 |
+
</tr>
|
| 93 |
+
<tr>
|
| 94 |
+
<td class="code">
|
| 95 |
+
\anchor cwisetable_exp
|
| 96 |
+
a.\link ArrayBase::exp exp\endlink(); \n
|
| 97 |
+
\link Eigen::exp exp\endlink(a);
|
| 98 |
+
</td>
|
| 99 |
+
<td>\f$ e \f$ raised to the given power (\f$ e^{a_i} \f$) </td>
|
| 100 |
+
<td class="code">
|
| 101 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/exp">std::exp</a>; \n
|
| 102 |
+
exp(a[i]);
|
| 103 |
+
</td>
|
| 104 |
+
<td>SSE2, AVX (f,d)</td>
|
| 105 |
+
</tr>
|
| 106 |
+
<tr>
|
| 107 |
+
<td class="code">
|
| 108 |
+
\anchor cwisetable_log
|
| 109 |
+
a.\link ArrayBase::log log\endlink(); \n
|
| 110 |
+
\link Eigen::log log\endlink(a);
|
| 111 |
+
</td>
|
| 112 |
+
<td>natural (base \f$ e \f$) logarithm (\f$ \ln({a_i}) \f$)</td>
|
| 113 |
+
<td class="code">
|
| 114 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/log">std::log</a>; \n
|
| 115 |
+
log(a[i]);
|
| 116 |
+
</td>
|
| 117 |
+
<td>SSE2, AVX (f)</td>
|
| 118 |
+
</tr>
|
| 119 |
+
<tr>
|
| 120 |
+
<td class="code">
|
| 121 |
+
\anchor cwisetable_log1p
|
| 122 |
+
a.\link ArrayBase::log1p log1p\endlink(); \n
|
| 123 |
+
\link Eigen::log1p log1p\endlink(a);
|
| 124 |
+
</td>
|
| 125 |
+
<td>natural (base \f$ e \f$) logarithm of 1 plus \n the given number (\f$ \ln({1+a_i}) \f$)</td>
|
| 126 |
+
<td>built-in generic implementation based on \c log,\n
|
| 127 |
+
plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/log1p">\c std::log1p </a>; \cpp11</td>
|
| 128 |
+
<td></td>
|
| 129 |
+
</tr>
|
| 130 |
+
<tr>
|
| 131 |
+
<td class="code">
|
| 132 |
+
\anchor cwisetable_log10
|
| 133 |
+
a.\link ArrayBase::log10 log10\endlink(); \n
|
| 134 |
+
\link Eigen::log10 log10\endlink(a);
|
| 135 |
+
</td>
|
| 136 |
+
<td>base 10 logarithm (\f$ \log_{10}({a_i}) \f$)</td>
|
| 137 |
+
<td class="code">
|
| 138 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/log10">std::log10</a>; \n
|
| 139 |
+
log10(a[i]);
|
| 140 |
+
</td>
|
| 141 |
+
<td></td>
|
| 142 |
+
</tr>
|
| 143 |
+
<tr>
|
| 144 |
+
<th colspan="4">Power functions</th>
|
| 145 |
+
</tr>
|
| 146 |
+
<tr>
|
| 147 |
+
<td class="code">
|
| 148 |
+
\anchor cwisetable_pow
|
| 149 |
+
a.\link ArrayBase::pow pow\endlink(b); \n
|
| 150 |
+
\link ArrayBase::pow(const Eigen::ArrayBase< Derived > &x, const Eigen::ArrayBase< ExponentDerived > &exponents) pow\endlink(a,b);
|
| 151 |
+
</td>
|
| 152 |
+
<!-- For some reason Doxygen thinks that pow is in ArrayBase namespace -->
|
| 153 |
+
<td>raises a number to the given power (\f$ a_i ^ {b_i} \f$) \n \c a and \c b can be either an array or scalar.</td>
|
| 154 |
+
<td class="code">
|
| 155 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/pow">std::pow</a>; \n
|
| 156 |
+
pow(a[i],b[i]);\n
|
| 157 |
+
(plus builtin for integer types)</td>
|
| 158 |
+
<td></td>
|
| 159 |
+
</tr>
|
| 160 |
+
<tr>
|
| 161 |
+
<td class="code">
|
| 162 |
+
\anchor cwisetable_sqrt
|
| 163 |
+
a.\link ArrayBase::sqrt sqrt\endlink(); \n
|
| 164 |
+
\link Eigen::sqrt sqrt\endlink(a);\n
|
| 165 |
+
m.\link MatrixBase::cwiseSqrt cwiseSqrt\endlink();
|
| 166 |
+
</td>
|
| 167 |
+
<td>computes square root (\f$ \sqrt a_i \f$)</td>
|
| 168 |
+
<td class="code">
|
| 169 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/sqrt">std::sqrt</a>; \n
|
| 170 |
+
sqrt(a[i]);</td>
|
| 171 |
+
<td>SSE2, AVX (f,d)</td>
|
| 172 |
+
</tr>
|
| 173 |
+
<tr>
|
| 174 |
+
<td class="code">
|
| 175 |
+
\anchor cwisetable_rsqrt
|
| 176 |
+
a.\link ArrayBase::rsqrt rsqrt\endlink(); \n
|
| 177 |
+
\link Eigen::rsqrt rsqrt\endlink(a);
|
| 178 |
+
</td>
|
| 179 |
+
<td><a href="https://en.wikipedia.org/wiki/Fast_inverse_square_root">reciprocal square root</a> (\f$ 1/{\sqrt a_i} \f$)</td>
|
| 180 |
+
<td class="code">
|
| 181 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/sqrt">std::sqrt</a>; \n
|
| 182 |
+
1/sqrt(a[i]); \n
|
| 183 |
+
</td>
|
| 184 |
+
<td>SSE2, AVX, AltiVec, ZVector (f,d)\n
|
| 185 |
+
(approx + 1 Newton iteration)</td>
|
| 186 |
+
</tr>
|
| 187 |
+
<tr>
|
| 188 |
+
<td class="code">
|
| 189 |
+
\anchor cwisetable_square
|
| 190 |
+
a.\link ArrayBase::square square\endlink(); \n
|
| 191 |
+
\link Eigen::square square\endlink(a);
|
| 192 |
+
</td>
|
| 193 |
+
<td>computes square power (\f$ a_i^2 \f$)</td>
|
| 194 |
+
<td class="code">
|
| 195 |
+
a[i]*a[i]</td>
|
| 196 |
+
<td>All (i32,f,d,cf,cd)</td>
|
| 197 |
+
</tr>
|
| 198 |
+
<tr>
|
| 199 |
+
<td class="code">
|
| 200 |
+
\anchor cwisetable_cube
|
| 201 |
+
a.\link ArrayBase::cube cube\endlink(); \n
|
| 202 |
+
\link Eigen::cube cube\endlink(a);
|
| 203 |
+
</td>
|
| 204 |
+
<td>computes cubic power (\f$ a_i^3 \f$)</td>
|
| 205 |
+
<td class="code">
|
| 206 |
+
a[i]*a[i]*a[i]</td>
|
| 207 |
+
<td>All (i32,f,d,cf,cd)</td>
|
| 208 |
+
</tr>
|
| 209 |
+
<tr>
|
| 210 |
+
<td class="code">
|
| 211 |
+
\anchor cwisetable_abs2
|
| 212 |
+
a.\link ArrayBase::abs2 abs2\endlink(); \n
|
| 213 |
+
\link Eigen::abs2 abs2\endlink(a);\n
|
| 214 |
+
m.\link MatrixBase::cwiseAbs2 cwiseAbs2\endlink();
|
| 215 |
+
</td>
|
| 216 |
+
<td>computes the squared absolute value (\f$ |a_i|^2 \f$)</td>
|
| 217 |
+
<td class="code">
|
| 218 |
+
real: a[i]*a[i] \n
|
| 219 |
+
complex: real(a[i])*real(a[i]) \n
|
| 220 |
+
+ imag(a[i])*imag(a[i])</td>
|
| 221 |
+
<td>All (i32,f,d)</td>
|
| 222 |
+
</tr>
|
| 223 |
+
<tr>
|
| 224 |
+
<th colspan="4">Trigonometric functions</th>
|
| 225 |
+
</tr>
|
| 226 |
+
<tr>
|
| 227 |
+
<td class="code">
|
| 228 |
+
\anchor cwisetable_sin
|
| 229 |
+
a.\link ArrayBase::sin sin\endlink(); \n
|
| 230 |
+
\link Eigen::sin sin\endlink(a);
|
| 231 |
+
</td>
|
| 232 |
+
<td>computes sine</td>
|
| 233 |
+
<td class="code">
|
| 234 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/sin">std::sin</a>; \n
|
| 235 |
+
sin(a[i]);</td>
|
| 236 |
+
<td>SSE2, AVX (f)</td>
|
| 237 |
+
</tr>
|
| 238 |
+
<tr>
|
| 239 |
+
<td class="code">
|
| 240 |
+
\anchor cwisetable_cos
|
| 241 |
+
a.\link ArrayBase::cos cos\endlink(); \n
|
| 242 |
+
\link Eigen::cos cos\endlink(a);
|
| 243 |
+
</td>
|
| 244 |
+
<td>computes cosine</td>
|
| 245 |
+
<td class="code">
|
| 246 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/cos">std::cos</a>; \n
|
| 247 |
+
cos(a[i]);</td>
|
| 248 |
+
<td>SSE2, AVX (f)</td>
|
| 249 |
+
</tr>
|
| 250 |
+
<tr>
|
| 251 |
+
<td class="code">
|
| 252 |
+
\anchor cwisetable_tan
|
| 253 |
+
a.\link ArrayBase::tan tan\endlink(); \n
|
| 254 |
+
\link Eigen::tan tan\endlink(a);
|
| 255 |
+
</td>
|
| 256 |
+
<td>computes tangent</td>
|
| 257 |
+
<td class="code">
|
| 258 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/tan">std::tan</a>; \n
|
| 259 |
+
tan(a[i]);</td>
|
| 260 |
+
<td></td>
|
| 261 |
+
</tr>
|
| 262 |
+
<tr>
|
| 263 |
+
<td class="code">
|
| 264 |
+
\anchor cwisetable_asin
|
| 265 |
+
a.\link ArrayBase::asin asin\endlink(); \n
|
| 266 |
+
\link Eigen::asin asin\endlink(a);
|
| 267 |
+
</td>
|
| 268 |
+
<td>computes arc sine (\f$ \sin^{-1} a_i \f$)</td>
|
| 269 |
+
<td class="code">
|
| 270 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/asin">std::asin</a>; \n
|
| 271 |
+
asin(a[i]);</td>
|
| 272 |
+
<td></td>
|
| 273 |
+
</tr>
|
| 274 |
+
<tr>
|
| 275 |
+
<td class="code">
|
| 276 |
+
\anchor cwisetable_acos
|
| 277 |
+
a.\link ArrayBase::acos acos\endlink(); \n
|
| 278 |
+
\link Eigen::acos acos\endlink(a);
|
| 279 |
+
</td>
|
| 280 |
+
<td>computes arc cosine (\f$ \cos^{-1} a_i \f$)</td>
|
| 281 |
+
<td class="code">
|
| 282 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/acos">std::acos</a>; \n
|
| 283 |
+
acos(a[i]);</td>
|
| 284 |
+
<td></td>
|
| 285 |
+
</tr>
|
| 286 |
+
<tr>
|
| 287 |
+
<td class="code">
|
| 288 |
+
\anchor cwisetable_atan
|
| 289 |
+
a.\link ArrayBase::atan atan\endlink(); \n
|
| 290 |
+
\link Eigen::atan atan\endlink(a);
|
| 291 |
+
</td>
|
| 292 |
+
<td>computes arc tangent (\f$ \tan^{-1} a_i \f$)</td>
|
| 293 |
+
<td class="code">
|
| 294 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/atan">std::atan</a>; \n
|
| 295 |
+
atan(a[i]);</td>
|
| 296 |
+
<td></td>
|
| 297 |
+
</tr>
|
| 298 |
+
<tr>
|
| 299 |
+
<th colspan="4">Hyperbolic functions</th>
|
| 300 |
+
</tr>
|
| 301 |
+
<tr>
|
| 302 |
+
<td class="code">
|
| 303 |
+
\anchor cwisetable_sinh
|
| 304 |
+
a.\link ArrayBase::sinh sinh\endlink(); \n
|
| 305 |
+
\link Eigen::sinh sinh\endlink(a);
|
| 306 |
+
</td>
|
| 307 |
+
<td>computes hyperbolic sine</td>
|
| 308 |
+
<td class="code">
|
| 309 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/sinh">std::sinh</a>; \n
|
| 310 |
+
sinh(a[i]);</td>
|
| 311 |
+
<td></td>
|
| 312 |
+
</tr>
|
| 313 |
+
<tr>
|
| 314 |
+
<td class="code">
|
| 315 |
+
\anchor cwisetable_cosh
|
| 316 |
+
a.\link ArrayBase::cosh cosh\endlink(); \n
|
| 317 |
+
\link Eigen::cosh cosh\endlink(a);
|
| 318 |
+
</td>
|
| 319 |
+
<td>computes hyperbolic cosine</td>
|
| 320 |
+
<td class="code">
|
| 321 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/cosh">std::cosh</a>; \n
|
| 322 |
+
cosh(a[i]);</td>
|
| 323 |
+
<td></td>
|
| 324 |
+
</tr>
|
| 325 |
+
<tr>
|
| 326 |
+
<td class="code">
|
| 327 |
+
\anchor cwisetable_tanh
|
| 328 |
+
a.\link ArrayBase::tanh tanh\endlink(); \n
|
| 329 |
+
\link Eigen::tanh tanh\endlink(a);
|
| 330 |
+
</td>
|
| 331 |
+
<td>computes hyperbolic tangent</td>
|
| 332 |
+
<td class="code">
|
| 333 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/tanh">std::tanh</a>; \n
|
| 334 |
+
tanh(a[i]);</td>
|
| 335 |
+
<td></td>
|
| 336 |
+
</tr>
|
| 337 |
+
<tr>
|
| 338 |
+
<td class="code">
|
| 339 |
+
\anchor cwisetable_asinh
|
| 340 |
+
a.\link ArrayBase::asinh asinh\endlink(); \n
|
| 341 |
+
\link Eigen::asinh asinh\endlink(a);
|
| 342 |
+
</td>
|
| 343 |
+
<td>computes inverse hyperbolic sine</td>
|
| 344 |
+
<td class="code">
|
| 345 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/asinh">std::asinh</a>; \n
|
| 346 |
+
asinh(a[i]);</td>
|
| 347 |
+
<td></td>
|
| 348 |
+
</tr>
|
| 349 |
+
<tr>
|
| 350 |
+
<td class="code">
|
| 351 |
+
\anchor cwisetable_acosh
|
| 352 |
+
a.\link ArrayBase::acosh cohs\endlink(); \n
|
| 353 |
+
\link Eigen::acosh acosh\endlink(a);
|
| 354 |
+
</td>
|
| 355 |
+
<td>computes hyperbolic cosine</td>
|
| 356 |
+
<td class="code">
|
| 357 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/acosh">std::acosh</a>; \n
|
| 358 |
+
acosh(a[i]);</td>
|
| 359 |
+
<td></td>
|
| 360 |
+
</tr>
|
| 361 |
+
<tr>
|
| 362 |
+
<td class="code">
|
| 363 |
+
\anchor cwisetable_atanh
|
| 364 |
+
a.\link ArrayBase::atanh atanh\endlink(); \n
|
| 365 |
+
\link Eigen::atanh atanh\endlink(a);
|
| 366 |
+
</td>
|
| 367 |
+
<td>computes hyperbolic tangent</td>
|
| 368 |
+
<td class="code">
|
| 369 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/atanh">std::atanh</a>; \n
|
| 370 |
+
atanh(a[i]);</td>
|
| 371 |
+
<td></td>
|
| 372 |
+
</tr>
|
| 373 |
+
<tr>
|
| 374 |
+
<th colspan="4">Nearest integer floating point operations</th>
|
| 375 |
+
</tr>
|
| 376 |
+
<tr>
|
| 377 |
+
<td class="code">
|
| 378 |
+
\anchor cwisetable_ceil
|
| 379 |
+
a.\link ArrayBase::ceil ceil\endlink(); \n
|
| 380 |
+
\link Eigen::ceil ceil\endlink(a);
|
| 381 |
+
</td>
|
| 382 |
+
<td>nearest integer not less than the given value</td>
|
| 383 |
+
<td class="code">
|
| 384 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/ceil">std::ceil</a>; \n
|
| 385 |
+
ceil(a[i]);</td>
|
| 386 |
+
<td>SSE4,AVX,ZVector (f,d)</td>
|
| 387 |
+
</tr>
|
| 388 |
+
<tr>
|
| 389 |
+
<td class="code">
|
| 390 |
+
\anchor cwisetable_floor
|
| 391 |
+
a.\link ArrayBase::floor floor\endlink(); \n
|
| 392 |
+
\link Eigen::floor floor\endlink(a);
|
| 393 |
+
</td>
|
| 394 |
+
<td>nearest integer not greater than the given value</td>
|
| 395 |
+
<td class="code">
|
| 396 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/floor">std::floor</a>; \n
|
| 397 |
+
floor(a[i]);</td>
|
| 398 |
+
<td>SSE4,AVX,ZVector (f,d)</td>
|
| 399 |
+
</tr>
|
| 400 |
+
<tr>
|
| 401 |
+
<td class="code">
|
| 402 |
+
\anchor cwisetable_round
|
| 403 |
+
a.\link ArrayBase::round round\endlink(); \n
|
| 404 |
+
\link Eigen::round round\endlink(a);
|
| 405 |
+
</td>
|
| 406 |
+
<td>nearest integer, \n rounding away from zero in halfway cases</td>
|
| 407 |
+
<td>built-in generic implementation \n based on \c floor and \c ceil,\n
|
| 408 |
+
plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/round">\c std::round </a>; \cpp11</td>
|
| 409 |
+
<td>SSE4,AVX,ZVector (f,d)</td>
|
| 410 |
+
</tr>
|
| 411 |
+
<tr>
|
| 412 |
+
<td class="code">
|
| 413 |
+
\anchor cwisetable_rint
|
| 414 |
+
a.\link ArrayBase::rint rint\endlink(); \n
|
| 415 |
+
\link Eigen::rint rint\endlink(a);
|
| 416 |
+
</td>
|
| 417 |
+
<td>nearest integer, \n rounding to nearest even in halfway cases</td>
|
| 418 |
+
<td>built-in generic implementation using <a href="http://en.cppreference.com/w/cpp/numeric/math/rint">\c std::rint </a>; \cpp11
|
| 419 |
+
or <a href="http://en.cppreference.com/w/c/numeric/math/rint">\c rintf </a>; </td>
|
| 420 |
+
<td>SSE4,AVX (f,d)</td>
|
| 421 |
+
</tr>
|
| 422 |
+
<tr>
|
| 423 |
+
<th colspan="4">Floating point manipulation functions</th>
|
| 424 |
+
</tr>
|
| 425 |
+
<tr>
|
| 426 |
+
<th colspan="4">Classification and comparison</th>
|
| 427 |
+
</tr>
|
| 428 |
+
<tr>
|
| 429 |
+
<td class="code">
|
| 430 |
+
\anchor cwisetable_isfinite
|
| 431 |
+
a.\link ArrayBase::isFinite isFinite\endlink(); \n
|
| 432 |
+
\link Eigen::isfinite isfinite\endlink(a);
|
| 433 |
+
</td>
|
| 434 |
+
<td>checks if the given number has finite value</td>
|
| 435 |
+
<td>built-in generic implementation,\n
|
| 436 |
+
plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/isfinite">\c std::isfinite </a>; \cpp11</td>
|
| 437 |
+
<td></td>
|
| 438 |
+
</tr>
|
| 439 |
+
<tr>
|
| 440 |
+
<td class="code">
|
| 441 |
+
\anchor cwisetable_isinf
|
| 442 |
+
a.\link ArrayBase::isInf isInf\endlink(); \n
|
| 443 |
+
\link Eigen::isinf isinf\endlink(a);
|
| 444 |
+
</td>
|
| 445 |
+
<td>checks if the given number is infinite</td>
|
| 446 |
+
<td>built-in generic implementation,\n
|
| 447 |
+
plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/isinf">\c std::isinf </a>; \cpp11</td>
|
| 448 |
+
<td></td>
|
| 449 |
+
</tr>
|
| 450 |
+
<tr>
|
| 451 |
+
<td class="code">
|
| 452 |
+
\anchor cwisetable_isnan
|
| 453 |
+
a.\link ArrayBase::isNaN isNaN\endlink(); \n
|
| 454 |
+
\link Eigen::isnan isnan\endlink(a);
|
| 455 |
+
</td>
|
| 456 |
+
<td>checks if the given number is not a number</td>
|
| 457 |
+
<td>built-in generic implementation,\n
|
| 458 |
+
plus \c using <a href="http://en.cppreference.com/w/cpp/numeric/math/isnan">\c std::isnan </a>; \cpp11</td>
|
| 459 |
+
<td></td>
|
| 460 |
+
</tr>
|
| 461 |
+
<tr>
|
| 462 |
+
<th colspan="4">Error and gamma functions</th>
|
| 463 |
+
</tr>
|
| 464 |
+
<tr> <td colspan="4"> Require \c \#include \c <unsupported/Eigen/SpecialFunctions> </td></tr>
|
| 465 |
+
<tr>
|
| 466 |
+
<td class="code">
|
| 467 |
+
\anchor cwisetable_erf
|
| 468 |
+
a.\link ArrayBase::erf erf\endlink(); \n
|
| 469 |
+
\link Eigen::erf erf\endlink(a);
|
| 470 |
+
</td>
|
| 471 |
+
<td>error function</td>
|
| 472 |
+
<td class="code">
|
| 473 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/erf">std::erf</a>; \cpp11 \n
|
| 474 |
+
erf(a[i]);
|
| 475 |
+
</td>
|
| 476 |
+
<td></td>
|
| 477 |
+
</tr>
|
| 478 |
+
<tr>
|
| 479 |
+
<td class="code">
|
| 480 |
+
\anchor cwisetable_erfc
|
| 481 |
+
a.\link ArrayBase::erfc erfc\endlink(); \n
|
| 482 |
+
\link Eigen::erfc erfc\endlink(a);
|
| 483 |
+
</td>
|
| 484 |
+
<td>complementary error function</td>
|
| 485 |
+
<td class="code">
|
| 486 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/erfc">std::erfc</a>; \cpp11 \n
|
| 487 |
+
erfc(a[i]);
|
| 488 |
+
</td>
|
| 489 |
+
<td></td>
|
| 490 |
+
</tr>
|
| 491 |
+
<tr>
|
| 492 |
+
<td class="code">
|
| 493 |
+
\anchor cwisetable_lgamma
|
| 494 |
+
a.\link ArrayBase::lgamma lgamma\endlink(); \n
|
| 495 |
+
\link Eigen::lgamma lgamma\endlink(a);
|
| 496 |
+
</td>
|
| 497 |
+
<td>natural logarithm of the gamma function</td>
|
| 498 |
+
<td class="code">
|
| 499 |
+
using <a href="http://en.cppreference.com/w/cpp/numeric/math/lgamma">std::lgamma</a>; \cpp11 \n
|
| 500 |
+
lgamma(a[i]);
|
| 501 |
+
</td>
|
| 502 |
+
<td></td>
|
| 503 |
+
</tr>
|
| 504 |
+
<tr>
|
| 505 |
+
<td class="code">
|
| 506 |
+
\anchor cwisetable_digamma
|
| 507 |
+
a.\link ArrayBase::digamma digamma\endlink(); \n
|
| 508 |
+
digamma(a);
|
| 509 |
+
</td>
|
| 510 |
+
<td><a href="https://en.wikipedia.org/wiki/Digamma_function">logarithmic derivative of the gamma function</a></td>
|
| 511 |
+
<td>
|
| 512 |
+
built-in for float and double
|
| 513 |
+
</td>
|
| 514 |
+
<td></td>
|
| 515 |
+
</tr>
|
| 516 |
+
<tr>
|
| 517 |
+
<td class="code">
|
| 518 |
+
\anchor cwisetable_igamma
|
| 519 |
+
igamma(a,x);
|
| 520 |
+
</td>
|
| 521 |
+
<td><a href="https://en.wikipedia.org/wiki/Incomplete_gamma_function">lower incomplete gamma integral</a>
|
| 522 |
+
\n \f$ \gamma(a_i,x_i)= \frac{1}{|a_i|} \int_{0}^{x_i}e^{\text{-}t} t^{a_i-1} \mathrm{d} t \f$</td>
|
| 523 |
+
<td>
|
| 524 |
+
built-in for float and double,\n but requires \cpp11
|
| 525 |
+
</td>
|
| 526 |
+
<td></td>
|
| 527 |
+
</tr>
|
| 528 |
+
<tr>
|
| 529 |
+
<td class="code">
|
| 530 |
+
\anchor cwisetable_igammac
|
| 531 |
+
igammac(a,x);
|
| 532 |
+
</td>
|
| 533 |
+
<td><a href="https://en.wikipedia.org/wiki/Incomplete_gamma_function">upper incomplete gamma integral</a>
|
| 534 |
+
\n \f$ \Gamma(a_i,x_i) = \frac{1}{|a_i|} \int_{x_i}^{\infty}e^{\text{-}t} t^{a_i-1} \mathrm{d} t \f$</td>
|
| 535 |
+
<td>
|
| 536 |
+
built-in for float and double,\n but requires \cpp11
|
| 537 |
+
</td>
|
| 538 |
+
<td></td>
|
| 539 |
+
</tr>
|
| 540 |
+
<tr>
|
| 541 |
+
<th colspan="4">Special functions</th>
|
| 542 |
+
</tr>
|
| 543 |
+
<tr> <td colspan="4"> Require \c \#include \c <unsupported/Eigen/SpecialFunctions> </td></tr>
|
| 544 |
+
<tr>
|
| 545 |
+
<td class="code">
|
| 546 |
+
\anchor cwisetable_polygamma
|
| 547 |
+
polygamma(n,x);
|
| 548 |
+
</td>
|
| 549 |
+
<td><a href="https://en.wikipedia.org/wiki/Polygamma_function">n-th derivative of digamma at x</a></td>
|
| 550 |
+
<td>
|
| 551 |
+
built-in generic based on\n <a href="#cwisetable_lgamma">\c lgamma </a>,
|
| 552 |
+
<a href="#cwisetable_digamma"> \c digamma </a>
|
| 553 |
+
and <a href="#cwisetable_zeta">\c zeta </a>.
|
| 554 |
+
</td>
|
| 555 |
+
<td></td>
|
| 556 |
+
</tr>
|
| 557 |
+
<tr>
|
| 558 |
+
<td class="code">
|
| 559 |
+
\anchor cwisetable_betainc
|
| 560 |
+
betainc(a,b,x);
|
| 561 |
+
</td>
|
| 562 |
+
<td><a href="https://en.wikipedia.org/wiki/Beta_function#Incomplete_beta_function">Incomplete beta function</a></td>
|
| 563 |
+
<td>
|
| 564 |
+
built-in for float and double,\n but requires \cpp11
|
| 565 |
+
</td>
|
| 566 |
+
<td></td>
|
| 567 |
+
</tr>
|
| 568 |
+
<tr>
|
| 569 |
+
<td class="code">
|
| 570 |
+
\anchor cwisetable_zeta
|
| 571 |
+
zeta(a,b); \n
|
| 572 |
+
a.\link ArrayBase::zeta zeta\endlink(b);
|
| 573 |
+
</td>
|
| 574 |
+
<td><a href="https://en.wikipedia.org/wiki/Hurwitz_zeta_function">Hurwitz zeta function</a>
|
| 575 |
+
\n \f$ \zeta(a_i,b_i)=\sum_{k=0}^{\infty}(b_i+k)^{\text{-}a_i} \f$</td>
|
| 576 |
+
<td>
|
| 577 |
+
built-in for float and double
|
| 578 |
+
</td>
|
| 579 |
+
<td></td>
|
| 580 |
+
</tr>
|
| 581 |
+
<tr>
|
| 582 |
+
<td class="code">
|
| 583 |
+
\anchor cwisetable_ndtri
|
| 584 |
+
a.\link ArrayBase::ndtri ndtri\endlink(); \n
|
| 585 |
+
\link Eigen::ndtri ndtri\endlink(a);
|
| 586 |
+
</td>
|
| 587 |
+
<td>Inverse of the CDF of the Normal distribution function</td>
|
| 588 |
+
<td>
|
| 589 |
+
built-in for float and double
|
| 590 |
+
</td>
|
| 591 |
+
<td></td>
|
| 592 |
+
</tr>
|
| 593 |
+
<tr><td colspan="4"></td></tr>
|
| 594 |
+
</table>
|
| 595 |
+
|
| 596 |
+
\n
|
| 597 |
+
|
| 598 |
+
*/
|
| 599 |
+
|
| 600 |
+
}
|
include/eigen/doc/CustomizingEigen_CustomScalar.dox
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page TopicCustomizing_CustomScalar Using custom scalar types
|
| 4 |
+
\anchor user_defined_scalars
|
| 5 |
+
|
| 6 |
+
By default, Eigen currently supports standard floating-point types (\c float, \c double, \c std::complex<float>, \c std::complex<double>, \c long \c double), as well as all native integer types (e.g., \c int, \c unsigned \c int, \c short, etc.), and \c bool.
|
| 7 |
+
On x86-64 systems, \c long \c double permits to locally enforces the use of x87 registers with extended accuracy (in comparison to SSE).
|
| 8 |
+
|
| 9 |
+
In order to add support for a custom type \c T you need:
|
| 10 |
+
-# make sure the common operator (+,-,*,/,etc.) are supported by the type \c T
|
| 11 |
+
-# add a specialization of struct Eigen::NumTraits<T> (see \ref NumTraits)
|
| 12 |
+
-# define the math functions that makes sense for your type. This includes standard ones like sqrt, pow, sin, tan, conj, real, imag, etc, as well as abs2 which is Eigen specific.
|
| 13 |
+
(see the file Eigen/src/Core/MathFunctions.h)
|
| 14 |
+
|
| 15 |
+
The math function should be defined in the same namespace than \c T, or in the \c std namespace though that second approach is not recommended.
|
| 16 |
+
|
| 17 |
+
Here is a concrete example adding support for the Adolc's \c adouble type. <a href="https://projects.coin-or.org/ADOL-C">Adolc</a> is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives.
|
| 18 |
+
|
| 19 |
+
\code
|
| 20 |
+
#ifndef ADOLCSUPPORT_H
|
| 21 |
+
#define ADOLCSUPPORT_H
|
| 22 |
+
|
| 23 |
+
#define ADOLC_TAPELESS
|
| 24 |
+
#include <adolc/adouble.h>
|
| 25 |
+
#include <Eigen/Core>
|
| 26 |
+
|
| 27 |
+
namespace Eigen {
|
| 28 |
+
|
| 29 |
+
template<> struct NumTraits<adtl::adouble>
|
| 30 |
+
: NumTraits<double> // permits to get the epsilon, dummy_precision, lowest, highest functions
|
| 31 |
+
{
|
| 32 |
+
typedef adtl::adouble Real;
|
| 33 |
+
typedef adtl::adouble NonInteger;
|
| 34 |
+
typedef adtl::adouble Nested;
|
| 35 |
+
|
| 36 |
+
enum {
|
| 37 |
+
IsComplex = 0,
|
| 38 |
+
IsInteger = 0,
|
| 39 |
+
IsSigned = 1,
|
| 40 |
+
RequireInitialization = 1,
|
| 41 |
+
ReadCost = 1,
|
| 42 |
+
AddCost = 3,
|
| 43 |
+
MulCost = 3
|
| 44 |
+
};
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
namespace adtl {
|
| 50 |
+
|
| 51 |
+
inline const adouble& conj(const adouble& x) { return x; }
|
| 52 |
+
inline const adouble& real(const adouble& x) { return x; }
|
| 53 |
+
inline adouble imag(const adouble&) { return 0.; }
|
| 54 |
+
inline adouble abs(const adouble& x) { return fabs(x); }
|
| 55 |
+
inline adouble abs2(const adouble& x) { return x*x; }
|
| 56 |
+
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#endif // ADOLCSUPPORT_H
|
| 60 |
+
\endcode
|
| 61 |
+
|
| 62 |
+
This other example adds support for the \c mpq_class type from <a href="https://gmplib.org/">GMP</a>. It shows in particular how to change the way Eigen picks the best pivot during LU factorization. It selects the coefficient with the highest score, where the score is by default the absolute value of a number, but we can define a different score, for instance to prefer pivots with a more compact representation (this is an example, not a recommendation). Note that the scores should always be non-negative and only zero is allowed to have a score of zero. Also, this can interact badly with thresholds for inexact scalar types.
|
| 63 |
+
|
| 64 |
+
\code
|
| 65 |
+
#include <gmpxx.h>
|
| 66 |
+
#include <Eigen/Core>
|
| 67 |
+
#include <boost/operators.hpp>
|
| 68 |
+
|
| 69 |
+
namespace Eigen {
|
| 70 |
+
template<> struct NumTraits<mpq_class> : GenericNumTraits<mpq_class>
|
| 71 |
+
{
|
| 72 |
+
typedef mpq_class Real;
|
| 73 |
+
typedef mpq_class NonInteger;
|
| 74 |
+
typedef mpq_class Nested;
|
| 75 |
+
|
| 76 |
+
static inline Real epsilon() { return 0; }
|
| 77 |
+
static inline Real dummy_precision() { return 0; }
|
| 78 |
+
static inline int digits10() { return 0; }
|
| 79 |
+
|
| 80 |
+
enum {
|
| 81 |
+
IsInteger = 0,
|
| 82 |
+
IsSigned = 1,
|
| 83 |
+
IsComplex = 0,
|
| 84 |
+
RequireInitialization = 1,
|
| 85 |
+
ReadCost = 6,
|
| 86 |
+
AddCost = 150,
|
| 87 |
+
MulCost = 100
|
| 88 |
+
};
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
namespace internal {
|
| 92 |
+
|
| 93 |
+
template<> struct scalar_score_coeff_op<mpq_class> {
|
| 94 |
+
struct result_type : boost::totally_ordered1<result_type> {
|
| 95 |
+
std::size_t len;
|
| 96 |
+
result_type(int i = 0) : len(i) {} // Eigen uses Score(0) and Score()
|
| 97 |
+
result_type(mpq_class const& q) :
|
| 98 |
+
len(mpz_size(q.get_num_mpz_t())+
|
| 99 |
+
mpz_size(q.get_den_mpz_t())-1) {}
|
| 100 |
+
friend bool operator<(result_type x, result_type y) {
|
| 101 |
+
// 0 is the worst possible pivot
|
| 102 |
+
if (x.len == 0) return y.len > 0;
|
| 103 |
+
if (y.len == 0) return false;
|
| 104 |
+
// Prefer a pivot with a small representation
|
| 105 |
+
return x.len > y.len;
|
| 106 |
+
}
|
| 107 |
+
friend bool operator==(result_type x, result_type y) {
|
| 108 |
+
// Only used to test if the score is 0
|
| 109 |
+
return x.len == y.len;
|
| 110 |
+
}
|
| 111 |
+
};
|
| 112 |
+
result_type operator()(mpq_class const& x) const { return x; }
|
| 113 |
+
};
|
| 114 |
+
}
|
| 115 |
+
}
|
| 116 |
+
\endcode
|
| 117 |
+
|
| 118 |
+
*/
|
| 119 |
+
|
| 120 |
+
}
|
include/eigen/doc/CustomizingEigen_InheritingMatrix.dox
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page TopicCustomizing_InheritingMatrix Inheriting from Matrix
|
| 4 |
+
|
| 5 |
+
Before inheriting from Matrix, be really, I mean REALLY, sure that using
|
| 6 |
+
EIGEN_MATRIX_PLUGIN is not what you really want (see previous section).
|
| 7 |
+
If you just need to add few members to Matrix, this is the way to go.
|
| 8 |
+
|
| 9 |
+
An example of when you actually need to inherit Matrix, is when you
|
| 10 |
+
have several layers of heritage such as
|
| 11 |
+
MyVerySpecificVector1, MyVerySpecificVector2 -> MyVector1 -> Matrix and
|
| 12 |
+
MyVerySpecificVector3, MyVerySpecificVector4 -> MyVector2 -> Matrix.
|
| 13 |
+
|
| 14 |
+
In order for your object to work within the %Eigen framework, you need to
|
| 15 |
+
define a few members in your inherited class.
|
| 16 |
+
|
| 17 |
+
Here is a minimalistic example:
|
| 18 |
+
|
| 19 |
+
\include CustomizingEigen_Inheritance.cpp
|
| 20 |
+
|
| 21 |
+
Output: \verbinclude CustomizingEigen_Inheritance.out
|
| 22 |
+
|
| 23 |
+
This is the kind of error you can get if you don't provide those methods
|
| 24 |
+
\verbatim
|
| 25 |
+
error: no match for ‘operator=’ in ‘v = Eigen::operator*(
|
| 26 |
+
const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1, 0, -0x000000001, 1> >::Scalar&,
|
| 27 |
+
const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType&)
|
| 28 |
+
(((const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType&)
|
| 29 |
+
((const Eigen::MatrixBase<Eigen::Matrix<double, -0x000000001, 1> >::StorageBaseType*)(& v))))’
|
| 30 |
+
\endverbatim
|
| 31 |
+
|
| 32 |
+
*/
|
| 33 |
+
|
| 34 |
+
}
|
include/eigen/doc/CustomizingEigen_Plugins.dox
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page TopicCustomizing_Plugins Extending MatrixBase (and other classes)
|
| 4 |
+
|
| 5 |
+
In this section we will see how to add custom methods to MatrixBase. Since all expressions and matrix types inherit MatrixBase, adding a method to MatrixBase make it immediately available to all expressions ! A typical use case is, for instance, to make Eigen compatible with another API.
|
| 6 |
+
|
| 7 |
+
You certainly know that in C++ it is not possible to add methods to an existing class. So how that's possible ? Here the trick is to include in the declaration of MatrixBase a file defined by the preprocessor token \c EIGEN_MATRIXBASE_PLUGIN:
|
| 8 |
+
\code
|
| 9 |
+
class MatrixBase {
|
| 10 |
+
// ...
|
| 11 |
+
#ifdef EIGEN_MATRIXBASE_PLUGIN
|
| 12 |
+
#include EIGEN_MATRIXBASE_PLUGIN
|
| 13 |
+
#endif
|
| 14 |
+
};
|
| 15 |
+
\endcode
|
| 16 |
+
Therefore to extend MatrixBase with your own methods you just have to create a file with your method declaration and define EIGEN_MATRIXBASE_PLUGIN before you include any Eigen's header file.
|
| 17 |
+
|
| 18 |
+
You can extend many of the other classes used in Eigen by defining similarly named preprocessor symbols. For instance, define \c EIGEN_ARRAYBASE_PLUGIN if you want to extend the ArrayBase class. A full list of classes that can be extended in this way and the corresponding preprocessor symbols can be found on our page \ref TopicPreprocessorDirectives.
|
| 19 |
+
|
| 20 |
+
Here is an example of an extension file for adding methods to MatrixBase: \n
|
| 21 |
+
\b MatrixBaseAddons.h
|
| 22 |
+
\code
|
| 23 |
+
inline Scalar at(uint i, uint j) const { return this->operator()(i,j); }
|
| 24 |
+
inline Scalar& at(uint i, uint j) { return this->operator()(i,j); }
|
| 25 |
+
inline Scalar at(uint i) const { return this->operator[](i); }
|
| 26 |
+
inline Scalar& at(uint i) { return this->operator[](i); }
|
| 27 |
+
|
| 28 |
+
inline RealScalar squaredLength() const { return squaredNorm(); }
|
| 29 |
+
inline RealScalar length() const { return norm(); }
|
| 30 |
+
inline RealScalar invLength(void) const { return fast_inv_sqrt(squaredNorm()); }
|
| 31 |
+
|
| 32 |
+
template<typename OtherDerived>
|
| 33 |
+
inline Scalar squaredDistanceTo(const MatrixBase<OtherDerived>& other) const
|
| 34 |
+
{ return (derived() - other.derived()).squaredNorm(); }
|
| 35 |
+
|
| 36 |
+
template<typename OtherDerived>
|
| 37 |
+
inline RealScalar distanceTo(const MatrixBase<OtherDerived>& other) const
|
| 38 |
+
{ return internal::sqrt(derived().squaredDistanceTo(other)); }
|
| 39 |
+
|
| 40 |
+
inline void scaleTo(RealScalar l) { RealScalar vl = norm(); if (vl>1e-9) derived() *= (l/vl); }
|
| 41 |
+
|
| 42 |
+
inline Transpose<Derived> transposed() {return this->transpose();}
|
| 43 |
+
inline const Transpose<Derived> transposed() const {return this->transpose();}
|
| 44 |
+
|
| 45 |
+
inline uint minComponentId(void) const { int i; this->minCoeff(&i); return i; }
|
| 46 |
+
inline uint maxComponentId(void) const { int i; this->maxCoeff(&i); return i; }
|
| 47 |
+
|
| 48 |
+
template<typename OtherDerived>
|
| 49 |
+
void makeFloor(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMin(other.derived()); }
|
| 50 |
+
template<typename OtherDerived>
|
| 51 |
+
void makeCeil(const MatrixBase<OtherDerived>& other) { derived() = derived().cwiseMax(other.derived()); }
|
| 52 |
+
|
| 53 |
+
const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const ConstantReturnType>
|
| 54 |
+
operator+(const Scalar& scalar) const
|
| 55 |
+
{ return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const ConstantReturnType>(derived(), Constant(rows(),cols(),scalar)); }
|
| 56 |
+
|
| 57 |
+
friend const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ConstantReturnType, Derived>
|
| 58 |
+
operator+(const Scalar& scalar, const MatrixBase<Derived>& mat)
|
| 59 |
+
{ return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ConstantReturnType, Derived>(Constant(rows(),cols(),scalar), mat.derived()); }
|
| 60 |
+
\endcode
|
| 61 |
+
|
| 62 |
+
Then one can add the following declaration in the config.h or whatever prerequisites header file of his project:
|
| 63 |
+
\code
|
| 64 |
+
#define EIGEN_MATRIXBASE_PLUGIN "MatrixBaseAddons.h"
|
| 65 |
+
\endcode
|
| 66 |
+
|
| 67 |
+
*/
|
| 68 |
+
|
| 69 |
+
}
|
include/eigen/doc/Doxyfile.in
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Doxyfile 1.13.0
|
| 2 |
+
PROJECT_NAME = ${EIGEN_DOXY_PROJECT_NAME}
|
| 3 |
+
PROJECT_NUMBER = ${EIGEN_VERSION}
|
| 4 |
+
PROJECT_LOGO = ${Eigen_SOURCE_DIR}/doc/Eigen_Silly_Professor_64x64.png
|
| 5 |
+
OUTPUT_DIRECTORY = ${Eigen_BINARY_DIR}/doc${EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX}
|
| 6 |
+
FULL_PATH_NAMES = YES
|
| 7 |
+
STRIP_FROM_INC_PATH = ${Eigen_SOURCE_DIR}/
|
| 8 |
+
TAB_SIZE = 8
|
| 9 |
+
ALIASES = "only_for_vectors=This is only for vectors (either row-vectors or column-vectors), i.e. matrices which are known at compile-time to have either one row or one column." \
|
| 10 |
+
"not_reentrant=\warning This function is not re-entrant." \
|
| 11 |
+
"array_module=This is defined in the %Array module. \code #include <Eigen/Array> \endcode" \
|
| 12 |
+
"cholesky_module=This is defined in the %Cholesky module. \code #include <Eigen/Cholesky> \endcode" \
|
| 13 |
+
"eigenvalues_module=This is defined in the %Eigenvalues module. \code #include <Eigen/Eigenvalues> \endcode" \
|
| 14 |
+
"geometry_module=This is defined in the %Geometry module. \code #include <Eigen/Geometry> \endcode" \
|
| 15 |
+
"householder_module=This is defined in the %Householder module. \code #include <Eigen/Householder> \endcode" \
|
| 16 |
+
"jacobi_module=This is defined in the %Jacobi module. \code #include <Eigen/Jacobi> \endcode" \
|
| 17 |
+
"lu_module=This is defined in the %LU module. \code #include <Eigen/LU> \endcode" \
|
| 18 |
+
"qr_module=This is defined in the %QR module. \code #include <Eigen/QR> \endcode" \
|
| 19 |
+
"svd_module=This is defined in the %SVD module. \code #include <Eigen/SVD> \endcode" \
|
| 20 |
+
"specialfunctions_module=This is defined in the \b unsupported SpecialFunctions module. \code #include <Eigen/SpecialFunctions> \endcode" \
|
| 21 |
+
label=\bug \
|
| 22 |
+
"matrixworld=<a href='#matrixonly' style='color:green;text-decoration: none;'>*</a>" \
|
| 23 |
+
"arrayworld=<a href='#arrayonly' style='color:blue;text-decoration: none;'>*</a>" \
|
| 24 |
+
"note_about_arbitrary_choice_of_solution=If there exists more than one solution, this method will arbitrarily choose one." \
|
| 25 |
+
"note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \
|
| 26 |
+
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
|
| 27 |
+
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
|
| 28 |
+
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"" \
|
| 29 |
+
"implsparsesolverconcept=This class follows the \link TutorialSparseSolverConcept sparse solver concept \endlink." \
|
| 30 |
+
blank= \
|
| 31 |
+
"cpp11=<span class='cpp11'>[c++11]</span>" \
|
| 32 |
+
"cpp14=<span class='cpp14'>[c++14]</span>" \
|
| 33 |
+
"cpp17=<span class='cpp17'>[c++17]</span>" \
|
| 34 |
+
"newin{1}=<span class='newin3x'>New in %Eigen \1.</span>" \
|
| 35 |
+
eigenAutoToc= \
|
| 36 |
+
eigenManualPage=\defgroup
|
| 37 |
+
EXTENSION_MAPPING = .h=C++ \
|
| 38 |
+
no_extension=C++
|
| 39 |
+
DISTRIBUTE_GROUP_DOC = YES
|
| 40 |
+
NUM_PROC_THREADS = 0
|
| 41 |
+
EXTRACT_ALL = NO
|
| 42 |
+
EXTRACT_PRIVATE = NO
|
| 43 |
+
EXTRACT_PRIV_VIRTUAL = NO
|
| 44 |
+
EXTRACT_PACKAGE = NO
|
| 45 |
+
EXTRACT_STATIC = YES
|
| 46 |
+
EXTRACT_LOCAL_CLASSES = NO
|
| 47 |
+
EXTRACT_LOCAL_METHODS = NO
|
| 48 |
+
EXTRACT_ANON_NSPACES = NO
|
| 49 |
+
HIDE_UNDOC_MEMBERS = YES
|
| 50 |
+
HIDE_UNDOC_CLASSES = YES
|
| 51 |
+
HIDE_FRIEND_COMPOUNDS = YES
|
| 52 |
+
CASE_SENSE_NAMES = YES
|
| 53 |
+
SORT_BRIEF_DOCS = YES
|
| 54 |
+
GENERATE_TESTLIST = NO
|
| 55 |
+
MAX_INITIALIZER_LINES = 0
|
| 56 |
+
SHOW_NAMESPACES = NO
|
| 57 |
+
LAYOUT_FILE = ${Eigen_BINARY_DIR}/doc${EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX}/eigendoxy_layout.xml
|
| 58 |
+
WARN_IF_UNDOCUMENTED = NO
|
| 59 |
+
INPUT = ${EIGEN_DOXY_INPUT}
|
| 60 |
+
FILE_PATTERNS = *
|
| 61 |
+
RECURSIVE = YES
|
| 62 |
+
EXCLUDE = ${Eigen_SOURCE_DIR}/Eigen/Eigen2Support \
|
| 63 |
+
${Eigen_SOURCE_DIR}/Eigen/src/Eigen2Support \
|
| 64 |
+
${Eigen_SOURCE_DIR}/doc/examples \
|
| 65 |
+
${Eigen_SOURCE_DIR}/doc/special_examples \
|
| 66 |
+
${Eigen_SOURCE_DIR}/doc/snippets \
|
| 67 |
+
${Eigen_SOURCE_DIR}/unsupported/doc/examples \
|
| 68 |
+
${Eigen_SOURCE_DIR}/unsupported/doc/snippets
|
| 69 |
+
|
| 70 |
+
# ${Eigen_SOURCE_DIR}/Eigen/src/Core/products \
|
| 71 |
+
# ${Eigen_SOURCE_DIR}/Eigen/src/Core/util/ForwardDeclarations.h \
|
| 72 |
+
|
| 73 |
+
EXCLUDE_PATTERNS = CMake* \
|
| 74 |
+
*.txt \
|
| 75 |
+
*.sh \
|
| 76 |
+
*.orig \
|
| 77 |
+
*.diff \
|
| 78 |
+
diff \
|
| 79 |
+
*~ \
|
| 80 |
+
*. \
|
| 81 |
+
*.sln \
|
| 82 |
+
*.sdf \
|
| 83 |
+
*.tmp \
|
| 84 |
+
*.vcxproj \
|
| 85 |
+
*.filters \
|
| 86 |
+
*.user \
|
| 87 |
+
*.suo
|
| 88 |
+
# The following are pseudo template bases, and not real classes.
|
| 89 |
+
# https://github.com/doxygen/doxygen/issues/11289
|
| 90 |
+
EXCLUDE_SYMBOLS = Kernel \
|
| 91 |
+
BinaryOp
|
| 92 |
+
EXAMPLE_PATH = ${Eigen_SOURCE_DIR}/doc/snippets \
|
| 93 |
+
${Eigen_BINARY_DIR}/doc/snippets \
|
| 94 |
+
${Eigen_SOURCE_DIR}/doc/examples \
|
| 95 |
+
${Eigen_BINARY_DIR}/doc/examples \
|
| 96 |
+
${Eigen_SOURCE_DIR}/doc/special_examples \
|
| 97 |
+
${Eigen_BINARY_DIR}/doc/special_examples \
|
| 98 |
+
${Eigen_SOURCE_DIR}/unsupported/doc/snippets \
|
| 99 |
+
${Eigen_BINARY_DIR}/unsupported/doc/snippets \
|
| 100 |
+
${Eigen_SOURCE_DIR}/unsupported/doc/examples \
|
| 101 |
+
${Eigen_BINARY_DIR}/unsupported/doc/examples
|
| 102 |
+
IMAGE_PATH = ${Eigen_BINARY_DIR}/doc/html
|
| 103 |
+
# Prevent README.md from being considered a directory description (i.e. for Tensor).
|
| 104 |
+
IMPLICIT_DIR_DOCS = NO
|
| 105 |
+
ALPHABETICAL_INDEX = NO
|
| 106 |
+
HTML_OUTPUT = ${Eigen_BINARY_DIR}/doc/html${EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX}
|
| 107 |
+
HTML_HEADER = ${Eigen_BINARY_DIR}/doc/eigendoxy_header.html
|
| 108 |
+
HTML_FOOTER = ${Eigen_BINARY_DIR}/doc/eigendoxy_footer.html
|
| 109 |
+
HTML_EXTRA_FILES = ${Eigen_SOURCE_DIR}/doc/eigendoxy.css
|
| 110 |
+
HTML_COLORSTYLE_HUE = ${EIGEN_DOXY_HTML_COLORSTYLE_HUE}
|
| 111 |
+
HTML_DYNAMIC_SECTIONS = YES
|
| 112 |
+
DISABLE_INDEX = YES
|
| 113 |
+
FULL_SIDEBAR = NO
|
| 114 |
+
ENUM_VALUES_PER_LINE = 1
|
| 115 |
+
USE_MATHJAX = @EIGEN_DOXY_USE_MATHJAX@
|
| 116 |
+
MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2
|
| 117 |
+
MATHJAX_EXTENSIONS = TeX/AMSmath \
|
| 118 |
+
TeX/AMSsymbols
|
| 119 |
+
GENERATE_LATEX = NO
|
| 120 |
+
EXTRA_PACKAGES = amssymb \
|
| 121 |
+
amsmath
|
| 122 |
+
MACRO_EXPANSION = YES
|
| 123 |
+
EXPAND_ONLY_PREDEF = YES
|
| 124 |
+
PREDEFINED = EIGEN_EMPTY_STRUCT \
|
| 125 |
+
EIGEN_PARSED_BY_DOXYGEN \
|
| 126 |
+
EIGEN_VECTORIZE \
|
| 127 |
+
EIGEN_QT_SUPPORT \
|
| 128 |
+
EIGEN_STRONG_INLINE=inline \
|
| 129 |
+
EIGEN_DEVICE_FUNC= \
|
| 130 |
+
"EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \
|
| 131 |
+
"EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<LHS::Scalar,RHS::Scalar>, const LHS, const RHS>" \
|
| 132 |
+
"EIGEN_CAT2(a,b)= a ## b" \
|
| 133 |
+
"EIGEN_CAT(a,b)=EIGEN_CAT2(a,b)" \
|
| 134 |
+
"EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME)=CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<LHS::Scalar, RHS::Scalar>, const LHS, const RHS>" \
|
| 135 |
+
EIGEN_ALIGN_TO_BOUNDARY(x)= \
|
| 136 |
+
"DOXCOMMA=," \
|
| 137 |
+
"EIGEN_STATIC_ASSERT(COND,MSG)=" \
|
| 138 |
+
EIGEN_HAS_CXX11_MATH=1 \
|
| 139 |
+
EIGEN_HAS_CXX11=1
|
| 140 |
+
EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \
|
| 141 |
+
EIGEN_MAKE_FIXED_TYPEDEFS \
|
| 142 |
+
EIGEN_MAKE_TYPEDEFS_ALL_SIZES \
|
| 143 |
+
EIGEN_MAKE_ARRAY_TYPEDEFS \
|
| 144 |
+
EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS \
|
| 145 |
+
EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES \
|
| 146 |
+
EIGEN_CWISE_UNOP_RETURN_TYPE \
|
| 147 |
+
EIGEN_CWISE_BINOP_RETURN_TYPE \
|
| 148 |
+
EIGEN_CURRENT_STORAGE_BASE_CLASS \
|
| 149 |
+
EIGEN_MATHFUNC_IMPL \
|
| 150 |
+
_EIGEN_GENERIC_PUBLIC_INTERFACE \
|
| 151 |
+
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY \
|
| 152 |
+
EIGEN_EMPTY \
|
| 153 |
+
EIGEN_EULER_ANGLES_TYPEDEFS \
|
| 154 |
+
EIGEN_EULER_ANGLES_SINGLE_TYPEDEF \
|
| 155 |
+
EIGEN_EULER_SYSTEM_TYPEDEF \
|
| 156 |
+
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY \
|
| 157 |
+
EIGEN_MATRIX_FUNCTION \
|
| 158 |
+
EIGEN_MATRIX_FUNCTION_1 \
|
| 159 |
+
EIGEN_DOC_UNARY_ADDONS \
|
| 160 |
+
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL \
|
| 161 |
+
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF \
|
| 162 |
+
EIGEN_MAKE_SCALAR_BINARY_OP \
|
| 163 |
+
EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT
|
| 164 |
+
TAGFILES = ${EIGEN_DOXY_TAGFILES}
|
| 165 |
+
GENERATE_TAGFILE = ${Eigen_BINARY_DIR}/doc/${EIGEN_DOXY_PROJECT_NAME}.doxytags
|
| 166 |
+
EXTERNAL_GROUPS = NO
|
| 167 |
+
EXTERNAL_PAGES = NO
|
| 168 |
+
HIDE_UNDOC_RELATIONS = NO
|
| 169 |
+
HAVE_DOT = YES
|
| 170 |
+
COLLABORATION_GRAPH = NO
|
| 171 |
+
GROUP_GRAPHS = NO
|
| 172 |
+
UML_LOOK = YES
|
| 173 |
+
INCLUDE_GRAPH = NO
|
| 174 |
+
INCLUDED_BY_GRAPH = NO
|
| 175 |
+
GRAPHICAL_HIERARCHY = NO
|
| 176 |
+
DIRECTORY_GRAPH = NO
|
| 177 |
+
DOT_GRAPH_MAX_NODES = 300
|
| 178 |
+
GENERATE_DEPRECATEDLIST = NO
|
| 179 |
+
GENERATE_TODOLIST = NO
|
| 180 |
+
WARN_AS_ERROR = FAIL_ON_WARNINGS_PRINT
|
include/eigen/doc/FixedSizeVectorizable.dox
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TopicFixedSizeVectorizable Fixed-size vectorizable %Eigen objects
|
| 4 |
+
|
| 5 |
+
The goal of this page is to explain what we mean by "fixed-size vectorizable".
|
| 6 |
+
|
| 7 |
+
\section FixedSizeVectorizable_summary Executive Summary
|
| 8 |
+
|
| 9 |
+
An Eigen object is called "fixed-size vectorizable" if it has fixed size and that size is a multiple of 16 bytes.
|
| 10 |
+
|
| 11 |
+
Examples include:
|
| 12 |
+
\li Eigen::Vector2d
|
| 13 |
+
\li Eigen::Vector4d
|
| 14 |
+
\li Eigen::Vector4f
|
| 15 |
+
\li Eigen::Matrix2d
|
| 16 |
+
\li Eigen::Matrix2f
|
| 17 |
+
\li Eigen::Matrix4d
|
| 18 |
+
\li Eigen::Matrix4f
|
| 19 |
+
\li Eigen::Affine3d
|
| 20 |
+
\li Eigen::Affine3f
|
| 21 |
+
\li Eigen::Quaterniond
|
| 22 |
+
\li Eigen::Quaternionf
|
| 23 |
+
|
| 24 |
+
\section FixedSizeVectorizable_explanation Explanation
|
| 25 |
+
|
| 26 |
+
First, "fixed-size" should be clear: an %Eigen object has fixed size if its number of rows and its number of columns are fixed at compile-time. So for example \ref Matrix3f has fixed size, but \ref MatrixXf doesn't (the opposite of fixed-size is dynamic-size).
|
| 27 |
+
|
| 28 |
+
The array of coefficients of a fixed-size %Eigen object is a plain "static array", it is not dynamically allocated. For example, the data behind a \ref Matrix4f is just a "float array[16]".
|
| 29 |
+
|
| 30 |
+
Fixed-size objects are typically very small, which means that we want to handle them with zero runtime overhead -- both in terms of memory usage and of speed.
|
| 31 |
+
|
| 32 |
+
Now, vectorization works with 128-bit packets (e.g., SSE, AltiVec, NEON), 256-bit packets (e.g., AVX), or 512-bit packets (e.g., AVX512). Moreover, for performance reasons, these packets are most efficiently read and written if they have the same alignment as the packet size, that is 16 bytes, 32 bytes, and 64 bytes respectively.
|
| 33 |
+
|
| 34 |
+
So it turns out that the best way that fixed-size %Eigen objects can be vectorized, is if their size is a multiple of 16 bytes (or more). %Eigen will then request 16-byte alignment (or more) for these objects, and henceforth rely on these objects being aligned to achieve maximal efficiency.
|
| 35 |
+
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
}
|
include/eigen/doc/HiPerformance.dox
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
namespace Eigen {
|
| 3 |
+
|
| 4 |
+
/** \page TopicWritingEfficientProductExpression Writing efficient matrix product expressions
|
| 5 |
+
|
| 6 |
+
In general achieving good performance with Eigen does no require any special effort:
|
| 7 |
+
simply write your expressions in the most high level way. This is especially true
|
| 8 |
+
for small fixed size matrices. For large matrices, however, it might be useful to
|
| 9 |
+
take some care when writing your expressions in order to minimize useless evaluations
|
| 10 |
+
and optimize the performance.
|
| 11 |
+
In this page we will give a brief overview of the Eigen's internal mechanism to simplify
|
| 12 |
+
and evaluate complex product expressions, and discuss the current limitations.
|
| 13 |
+
In particular we will focus on expressions matching level 2 and 3 BLAS routines, i.e,
|
| 14 |
+
all kind of matrix products and triangular solvers.
|
| 15 |
+
|
| 16 |
+
Indeed, in Eigen we have implemented a set of highly optimized routines which are very similar
|
| 17 |
+
to BLAS's ones. Unlike BLAS, those routines are made available to user via a high level and
|
| 18 |
+
natural API. Each of these routines can compute in a single evaluation a wide variety of expressions.
|
| 19 |
+
Given an expression, the challenge is then to map it to a minimal set of routines.
|
| 20 |
+
As explained latter, this mechanism has some limitations, and knowing them will allow
|
| 21 |
+
you to write faster code by making your expressions more Eigen friendly.
|
| 22 |
+
|
| 23 |
+
\section GEMM General Matrix-Matrix product (GEMM)
|
| 24 |
+
|
| 25 |
+
Let's start with the most common primitive: the matrix product of general dense matrices.
|
| 26 |
+
In the BLAS world this corresponds to the GEMM routine. Our equivalent primitive can
|
| 27 |
+
perform the following operation:
|
| 28 |
+
\f$ C.noalias() += \alpha op1(A) op2(B) \f$
|
| 29 |
+
where A, B, and C are column and/or row major matrices (or sub-matrices),
|
| 30 |
+
alpha is a scalar value, and op1, op2 can be transpose, adjoint, conjugate, or the identity.
|
| 31 |
+
When Eigen detects a matrix product, it analyzes both sides of the product to extract a
|
| 32 |
+
unique scalar factor alpha, and for each side, its effective storage order, shape, and conjugation states.
|
| 33 |
+
More precisely each side is simplified by iteratively removing trivial expressions such as scalar multiple,
|
| 34 |
+
negation and conjugation. Transpose and Block expressions are not evaluated and they only modify the storage order
|
| 35 |
+
and shape. All other expressions are immediately evaluated.
|
| 36 |
+
For instance, the following expression:
|
| 37 |
+
\code m1.noalias() -= s4 * (s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2)) \endcode
|
| 38 |
+
is automatically simplified to:
|
| 39 |
+
\code m1.noalias() += (s1*s2*conj(s3)*s4) * m2.adjoint() * m3.conjugate() \endcode
|
| 40 |
+
which exactly matches our GEMM routine.
|
| 41 |
+
|
| 42 |
+
\subsection GEMM_Limitations Limitations
|
| 43 |
+
Unfortunately, this simplification mechanism is not perfect yet and not all expressions which could be
|
| 44 |
+
handled by a single GEMM-like call are correctly detected.
|
| 45 |
+
<table class="manual" style="width:100%">
|
| 46 |
+
<tr>
|
| 47 |
+
<th>Not optimal expression</th>
|
| 48 |
+
<th>Evaluated as</th>
|
| 49 |
+
<th>Optimal version (single evaluation)</th>
|
| 50 |
+
<th>Comments</th>
|
| 51 |
+
</tr>
|
| 52 |
+
<tr>
|
| 53 |
+
<td>\code
|
| 54 |
+
m1 += m2 * m3; \endcode</td>
|
| 55 |
+
<td>\code
|
| 56 |
+
temp = m2 * m3;
|
| 57 |
+
m1 += temp; \endcode</td>
|
| 58 |
+
<td>\code
|
| 59 |
+
m1.noalias() += m2 * m3; \endcode</td>
|
| 60 |
+
<td>Use .noalias() to tell Eigen the result and right-hand-sides do not alias.
|
| 61 |
+
Otherwise the product m2 * m3 is evaluated into a temporary.</td>
|
| 62 |
+
</tr>
|
| 63 |
+
<tr class="alt">
|
| 64 |
+
<td></td>
|
| 65 |
+
<td></td>
|
| 66 |
+
<td>\code
|
| 67 |
+
m1.noalias() += s1 * (m2 * m3); \endcode</td>
|
| 68 |
+
<td>This is a special feature of Eigen. Here the product between a scalar
|
| 69 |
+
and a matrix product does not evaluate the matrix product but instead it
|
| 70 |
+
returns a matrix product expression tracking the scalar scaling factor. <br>
|
| 71 |
+
Without this optimization, the matrix product would be evaluated into a
|
| 72 |
+
temporary as in the next example.</td>
|
| 73 |
+
</tr>
|
| 74 |
+
<tr>
|
| 75 |
+
<td>\code
|
| 76 |
+
m1.noalias() += (m2 * m3).adjoint(); \endcode</td>
|
| 77 |
+
<td>\code
|
| 78 |
+
temp = m2 * m3;
|
| 79 |
+
m1 += temp.adjoint(); \endcode</td>
|
| 80 |
+
<td>\code
|
| 81 |
+
m1.noalias() += m3.adjoint()
|
| 82 |
+
* * m2.adjoint(); \endcode</td>
|
| 83 |
+
<td>This is because the product expression has the EvalBeforeNesting bit which
|
| 84 |
+
enforces the evaluation of the product by the Tranpose expression.</td>
|
| 85 |
+
</tr>
|
| 86 |
+
<tr class="alt">
|
| 87 |
+
<td>\code
|
| 88 |
+
m1 = m1 + m2 * m3; \endcode</td>
|
| 89 |
+
<td>\code
|
| 90 |
+
temp = m2 * m3;
|
| 91 |
+
m1 = m1 + temp; \endcode</td>
|
| 92 |
+
<td>\code m1.noalias() += m2 * m3; \endcode</td>
|
| 93 |
+
<td>Here there is no way to detect at compile time that the two m1 are the same,
|
| 94 |
+
and so the matrix product will be immediately evaluated.</td>
|
| 95 |
+
</tr>
|
| 96 |
+
<tr>
|
| 97 |
+
<td>\code
|
| 98 |
+
m1.noalias() = m4 + m2 * m3; \endcode</td>
|
| 99 |
+
<td>\code
|
| 100 |
+
temp = m2 * m3;
|
| 101 |
+
m1 = m4 + temp; \endcode</td>
|
| 102 |
+
<td>\code
|
| 103 |
+
m1 = m4;
|
| 104 |
+
m1.noalias() += m2 * m3; \endcode</td>
|
| 105 |
+
<td>First of all, here the .noalias() in the first expression is useless because
|
| 106 |
+
m2*m3 will be evaluated anyway. However, note how this expression can be rewritten
|
| 107 |
+
so that no temporary is required. (tip: for very small fixed size matrix
|
| 108 |
+
it is slightly better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
|
| 109 |
+
</tr>
|
| 110 |
+
<tr class="alt">
|
| 111 |
+
<td>\code
|
| 112 |
+
m1.noalias() += (s1*m2).block(..) * m3; \endcode</td>
|
| 113 |
+
<td>\code
|
| 114 |
+
temp = (s1*m2).block(..);
|
| 115 |
+
m1 += temp * m3; \endcode</td>
|
| 116 |
+
<td>\code
|
| 117 |
+
m1.noalias() += s1 * m2.block(..) * m3; \endcode</td>
|
| 118 |
+
<td>This is because our expression analyzer is currently not able to extract trivial
|
| 119 |
+
expressions nested in a Block expression. Therefore the nested scalar
|
| 120 |
+
multiple cannot be properly extracted.</td>
|
| 121 |
+
</tr>
|
| 122 |
+
</table>
|
| 123 |
+
|
| 124 |
+
Of course all these remarks hold for all other kind of products involving triangular or selfadjoint matrices.
|
| 125 |
+
|
| 126 |
+
*/
|
| 127 |
+
|
| 128 |
+
}
|
include/eigen/doc/InplaceDecomposition.dox
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage InplaceDecomposition Inplace matrix decompositions
|
| 4 |
+
|
| 5 |
+
Starting from %Eigen 3.3, the LU, Cholesky, and QR decompositions can operate \em inplace, that is, directly within the given input matrix.
|
| 6 |
+
This feature is especially useful when dealing with huge matrices, and or when the available memory is very limited (embedded systems).
|
| 7 |
+
|
| 8 |
+
To this end, the respective decomposition class must be instantiated with a Ref<> matrix type, and the decomposition object must be constructed with the input matrix as argument. As an example, let us consider an inplace LU decomposition with partial pivoting.
|
| 9 |
+
|
| 10 |
+
Let's start with the basic inclusions, and declaration of a 2x2 matrix \c A:
|
| 11 |
+
|
| 12 |
+
<table class="example">
|
| 13 |
+
<tr><th>code</th><th>output</th></tr>
|
| 14 |
+
<tr>
|
| 15 |
+
<td>\snippet TutorialInplaceLU.cpp init
|
| 16 |
+
</td>
|
| 17 |
+
<td>\snippet TutorialInplaceLU.out init
|
| 18 |
+
</td>
|
| 19 |
+
</tr>
|
| 20 |
+
</table>
|
| 21 |
+
|
| 22 |
+
No surprise here! Then, let's declare our inplace LU object \c lu, and check the content of the matrix \c A:
|
| 23 |
+
|
| 24 |
+
<table class="example">
|
| 25 |
+
<tr>
|
| 26 |
+
<td>\snippet TutorialInplaceLU.cpp declaration
|
| 27 |
+
</td>
|
| 28 |
+
<td>\snippet TutorialInplaceLU.out declaration
|
| 29 |
+
</td>
|
| 30 |
+
</tr>
|
| 31 |
+
</table>
|
| 32 |
+
|
| 33 |
+
Here, the \c lu object computes and stores the \c L and \c U factors within the memory held by the matrix \c A.
|
| 34 |
+
The coefficients of \c A have thus been destroyed during the factorization, and replaced by the L and U factors as one can verify:
|
| 35 |
+
|
| 36 |
+
<table class="example">
|
| 37 |
+
<tr>
|
| 38 |
+
<td>\snippet TutorialInplaceLU.cpp matrixLU
|
| 39 |
+
</td>
|
| 40 |
+
<td>\snippet TutorialInplaceLU.out matrixLU
|
| 41 |
+
</td>
|
| 42 |
+
</tr>
|
| 43 |
+
</table>
|
| 44 |
+
|
| 45 |
+
Then, one can use the \c lu object as usual, for instance to solve the Ax=b problem:
|
| 46 |
+
<table class="example">
|
| 47 |
+
<tr>
|
| 48 |
+
<td>\snippet TutorialInplaceLU.cpp solve
|
| 49 |
+
</td>
|
| 50 |
+
<td>\snippet TutorialInplaceLU.out solve
|
| 51 |
+
</td>
|
| 52 |
+
</tr>
|
| 53 |
+
</table>
|
| 54 |
+
|
| 55 |
+
Here, since the content of the original matrix \c A has been lost, we had to declared a new matrix \c A0 to verify the result.
|
| 56 |
+
|
| 57 |
+
Since the memory is shared between \c A and \c lu, modifying the matrix \c A will make \c lu invalid.
|
| 58 |
+
This can easily be verified by modifying the content of \c A and trying to solve the initial problem again:
|
| 59 |
+
|
| 60 |
+
<table class="example">
|
| 61 |
+
<tr>
|
| 62 |
+
<td>\snippet TutorialInplaceLU.cpp modifyA
|
| 63 |
+
</td>
|
| 64 |
+
<td>\snippet TutorialInplaceLU.out modifyA
|
| 65 |
+
</td>
|
| 66 |
+
</tr>
|
| 67 |
+
</table>
|
| 68 |
+
|
| 69 |
+
Note that there is no shared pointer under the hood, it is the \b responsibility \b of \b the \b user to keep the input matrix \c A in life as long as \c lu is living.
|
| 70 |
+
|
| 71 |
+
If one wants to update the factorization with the modified A, one has to call the compute method as usual:
|
| 72 |
+
<table class="example">
|
| 73 |
+
<tr>
|
| 74 |
+
<td>\snippet TutorialInplaceLU.cpp recompute
|
| 75 |
+
</td>
|
| 76 |
+
<td>\snippet TutorialInplaceLU.out recompute
|
| 77 |
+
</td>
|
| 78 |
+
</tr>
|
| 79 |
+
</table>
|
| 80 |
+
|
| 81 |
+
Note that calling compute does not change the memory which is referenced by the \c lu object. Therefore, if the compute method is called with another matrix \c A1 different than \c A, then the content of \c A1 won't be modified. This is still the content of \c A that will be used to store the L and U factors of the matrix \c A1.
|
| 82 |
+
This can easily be verified as follows:
|
| 83 |
+
<table class="example">
|
| 84 |
+
<tr>
|
| 85 |
+
<td>\snippet TutorialInplaceLU.cpp recompute_bis0
|
| 86 |
+
</td>
|
| 87 |
+
<td>\snippet TutorialInplaceLU.out recompute_bis0
|
| 88 |
+
</td>
|
| 89 |
+
</tr>
|
| 90 |
+
</table>
|
| 91 |
+
The matrix \c A1 is unchanged, and one can thus solve A1*x=b, and directly check the residual without any copy of \c A1:
|
| 92 |
+
<table class="example">
|
| 93 |
+
<tr>
|
| 94 |
+
<td>\snippet TutorialInplaceLU.cpp recompute_bis1
|
| 95 |
+
</td>
|
| 96 |
+
<td>\snippet TutorialInplaceLU.out recompute_bis1
|
| 97 |
+
</td>
|
| 98 |
+
</tr>
|
| 99 |
+
</table>
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
Here is the list of matrix decompositions supporting this inplace mechanism:
|
| 103 |
+
|
| 104 |
+
- class LLT
|
| 105 |
+
- class LDLT
|
| 106 |
+
- class PartialPivLU
|
| 107 |
+
- class FullPivLU
|
| 108 |
+
- class HouseholderQR
|
| 109 |
+
- class ColPivHouseholderQR
|
| 110 |
+
- class FullPivHouseholderQR
|
| 111 |
+
- class CompleteOrthogonalDecomposition
|
| 112 |
+
|
| 113 |
+
*/
|
| 114 |
+
|
| 115 |
+
}
|
include/eigen/doc/LeastSquares.dox
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage LeastSquares Solving linear least squares systems
|
| 4 |
+
|
| 5 |
+
This page describes how to solve linear least squares systems using %Eigen. An overdetermined system
|
| 6 |
+
of equations, say \a Ax = \a b, has no solutions. In this case, it makes sense to search for the
|
| 7 |
+
vector \a x which is closest to being a solution, in the sense that the difference \a Ax - \a b is
|
| 8 |
+
as small as possible. This \a x is called the least square solution (if the Euclidean norm is used).
|
| 9 |
+
|
| 10 |
+
The three methods discussed on this page are the SVD decomposition, the QR decomposition and normal
|
| 11 |
+
equations. Of these, the SVD decomposition is generally the most accurate but the slowest, normal
|
| 12 |
+
equations is the fastest but least accurate, and the QR decomposition is in between.
|
| 13 |
+
|
| 14 |
+
\eigenAutoToc
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
\section LeastSquaresSVD Using the SVD decomposition
|
| 18 |
+
|
| 19 |
+
The \link BDCSVD::solve() solve() \endlink method in the BDCSVD class can be directly used to
|
| 20 |
+
solve linear squares systems. It is not enough to compute only the singular values (the default for
|
| 21 |
+
this class); you also need the singular vectors but the thin SVD decomposition suffices for
|
| 22 |
+
computing least squares solutions:
|
| 23 |
+
|
| 24 |
+
<table class="example">
|
| 25 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 26 |
+
<tr>
|
| 27 |
+
<td>\include TutorialLinAlgSVDSolve.cpp </td>
|
| 28 |
+
<td>\verbinclude TutorialLinAlgSVDSolve.out </td>
|
| 29 |
+
</tr>
|
| 30 |
+
</table>
|
| 31 |
+
|
| 32 |
+
This is example from the page \link TutorialLinearAlgebra Linear algebra and decompositions \endlink.
|
| 33 |
+
If you just need to solve the least squares problem, but are not interested in the SVD per se, a
|
| 34 |
+
faster alternative method is CompleteOrthogonalDecomposition.
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
\section LeastSquaresQR Using the QR decomposition
|
| 38 |
+
|
| 39 |
+
The solve() method in QR decomposition classes also computes the least squares solution. There are
|
| 40 |
+
three QR decomposition classes: HouseholderQR (no pivoting, fast but unstable if your matrix is
|
| 41 |
+
not rull rank), ColPivHouseholderQR (column pivoting, thus a bit slower but more stable) and
|
| 42 |
+
FullPivHouseholderQR (full pivoting, so slowest and slightly more stable than ColPivHouseholderQR).
|
| 43 |
+
Here is an example with column pivoting:
|
| 44 |
+
|
| 45 |
+
<table class="example">
|
| 46 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 47 |
+
<tr>
|
| 48 |
+
<td>\include LeastSquaresQR.cpp </td>
|
| 49 |
+
<td>\verbinclude LeastSquaresQR.out </td>
|
| 50 |
+
</tr>
|
| 51 |
+
</table>
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
\section LeastSquaresNormalEquations Using normal equations
|
| 55 |
+
|
| 56 |
+
Finding the least squares solution of \a Ax = \a b is equivalent to solving the normal equation
|
| 57 |
+
<i>A</i><sup>T</sup><i>Ax</i> = <i>A</i><sup>T</sup><i>b</i>. This leads to the following code
|
| 58 |
+
|
| 59 |
+
<table class="example">
|
| 60 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 61 |
+
<tr>
|
| 62 |
+
<td>\include LeastSquaresNormalEquations.cpp </td>
|
| 63 |
+
<td>\verbinclude LeastSquaresNormalEquations.out </td>
|
| 64 |
+
</tr>
|
| 65 |
+
</table>
|
| 66 |
+
|
| 67 |
+
This method is usually the fastest, especially when \a A is "tall and skinny". However, if the
|
| 68 |
+
matrix \a A is even mildly ill-conditioned, this is not a good method, because the condition number
|
| 69 |
+
of <i>A</i><sup>T</sup><i>A</i> is the square of the condition number of \a A. This means that you
|
| 70 |
+
lose roughly twice as many digits of accuracy using the normal equation, compared to the more stable
|
| 71 |
+
methods mentioned above.
|
| 72 |
+
|
| 73 |
+
*/
|
| 74 |
+
|
| 75 |
+
}
|
include/eigen/doc/Manual.dox
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
// This file strutures pages and modules into a convenient hierarchical structure.
|
| 3 |
+
|
| 4 |
+
namespace Eigen {
|
| 5 |
+
|
| 6 |
+
/** \page UserManual_CustomizingEigen Extending/Customizing Eigen
|
| 7 |
+
%Eigen can be extended in several ways, for instance, by defining global methods, by inserting custom methods within main %Eigen's classes through the \ref TopicCustomizing_Plugins "plugin" mechanism, by adding support to \ref TopicCustomizing_CustomScalar "custom scalar types" etc. See below for the respective sub-topics.
|
| 8 |
+
- \subpage TopicCustomizing_Plugins
|
| 9 |
+
- \subpage TopicCustomizing_InheritingMatrix
|
| 10 |
+
- \subpage TopicCustomizing_CustomScalar
|
| 11 |
+
- \subpage TopicCustomizing_NullaryExpr
|
| 12 |
+
- \subpage TopicNewExpressionType
|
| 13 |
+
\sa \ref TopicPreprocessorDirectives
|
| 14 |
+
*/
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
/** \page UserManual_Generalities General topics
|
| 18 |
+
- \subpage TopicFunctionTakingEigenTypes
|
| 19 |
+
- \subpage TopicPreprocessorDirectives
|
| 20 |
+
- \subpage TopicAssertions
|
| 21 |
+
- \subpage TopicMultiThreading
|
| 22 |
+
- \subpage TopicUsingBlasLapack
|
| 23 |
+
- \subpage TopicUsingIntelMKL
|
| 24 |
+
- \subpage TopicCUDA
|
| 25 |
+
- \subpage TopicPitfalls
|
| 26 |
+
- \subpage TopicTemplateKeyword
|
| 27 |
+
- \subpage UserManual_UnderstandingEigen
|
| 28 |
+
- \subpage TopicCMakeGuide
|
| 29 |
+
*/
|
| 30 |
+
|
| 31 |
+
/** \page UserManual_UnderstandingEigen Understanding Eigen
|
| 32 |
+
- \subpage TopicInsideEigenExample
|
| 33 |
+
- \subpage TopicClassHierarchy
|
| 34 |
+
- \subpage TopicLazyEvaluation
|
| 35 |
+
*/
|
| 36 |
+
|
| 37 |
+
/** \page UnclassifiedPages Unclassified pages
|
| 38 |
+
- \subpage TopicResizing
|
| 39 |
+
- \subpage TopicVectorization
|
| 40 |
+
- \subpage TopicEigenExpressionTemplates
|
| 41 |
+
- \subpage TopicScalarTypes
|
| 42 |
+
- \subpage TutorialSparse_example_details
|
| 43 |
+
- \subpage TopicWritingEfficientProductExpression
|
| 44 |
+
- \subpage Experimental
|
| 45 |
+
*/
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
/** \defgroup Support_modules Support modules
|
| 49 |
+
* Category of modules which add support for external libraries.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
/** \defgroup DenseMatrixManipulation_chapter Dense matrix and array manipulation */
|
| 54 |
+
/** \defgroup DenseMatrixManipulation_Alignement Alignment issues */
|
| 55 |
+
/** \defgroup DenseMatrixManipulation_Reference Reference */
|
| 56 |
+
|
| 57 |
+
/** \addtogroup TutorialMatrixClass
|
| 58 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 59 |
+
/** \addtogroup TutorialMatrixArithmetic
|
| 60 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 61 |
+
/** \addtogroup TutorialArrayClass
|
| 62 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 63 |
+
/** \addtogroup TutorialBlockOperations
|
| 64 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 65 |
+
/** \addtogroup TutorialSlicingIndexing
|
| 66 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 67 |
+
/** \addtogroup TutorialAdvancedInitialization
|
| 68 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 69 |
+
/** \addtogroup TutorialReductionsVisitorsBroadcasting
|
| 70 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 71 |
+
/** \addtogroup TutorialReshape
|
| 72 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 73 |
+
/** \addtogroup TutorialSTL
|
| 74 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 75 |
+
/** \addtogroup TutorialMapClass
|
| 76 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 77 |
+
/** \addtogroup TopicAliasing
|
| 78 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 79 |
+
/** \addtogroup TopicStorageOrders
|
| 80 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 81 |
+
|
| 82 |
+
/** \addtogroup DenseMatrixManipulation_Alignement
|
| 83 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 84 |
+
/** \addtogroup TopicUnalignedArrayAssert
|
| 85 |
+
\ingroup DenseMatrixManipulation_Alignement */
|
| 86 |
+
/** \addtogroup TopicFixedSizeVectorizable
|
| 87 |
+
\ingroup DenseMatrixManipulation_Alignement */
|
| 88 |
+
/** \addtogroup TopicStructHavingEigenMembers
|
| 89 |
+
\ingroup DenseMatrixManipulation_Alignement */
|
| 90 |
+
/** \addtogroup TopicStlContainers
|
| 91 |
+
\ingroup DenseMatrixManipulation_Alignement */
|
| 92 |
+
/** \addtogroup TopicPassingByValue
|
| 93 |
+
\ingroup DenseMatrixManipulation_Alignement */
|
| 94 |
+
/** \addtogroup TopicWrongStackAlignment
|
| 95 |
+
\ingroup DenseMatrixManipulation_Alignement */
|
| 96 |
+
|
| 97 |
+
/** \addtogroup DenseMatrixManipulation_Reference
|
| 98 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 99 |
+
/** \addtogroup Core_Module
|
| 100 |
+
\ingroup DenseMatrixManipulation_Reference */
|
| 101 |
+
/** \addtogroup Jacobi_Module
|
| 102 |
+
\ingroup DenseMatrixManipulation_Reference */
|
| 103 |
+
/** \addtogroup Householder_Module
|
| 104 |
+
\ingroup DenseMatrixManipulation_Reference */
|
| 105 |
+
|
| 106 |
+
/** \addtogroup CoeffwiseMathFunctions
|
| 107 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 108 |
+
|
| 109 |
+
/** \addtogroup QuickRefPage
|
| 110 |
+
\ingroup DenseMatrixManipulation_chapter */
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
/** \defgroup DenseLinearSolvers_chapter Dense linear problems and decompositions */
|
| 114 |
+
/** \defgroup DenseLinearSolvers_Reference Reference */
|
| 115 |
+
|
| 116 |
+
/** \addtogroup TutorialLinearAlgebra
|
| 117 |
+
\ingroup DenseLinearSolvers_chapter */
|
| 118 |
+
/** \addtogroup TopicLinearAlgebraDecompositions
|
| 119 |
+
\ingroup DenseLinearSolvers_chapter */
|
| 120 |
+
/** \addtogroup LeastSquares
|
| 121 |
+
\ingroup DenseLinearSolvers_chapter */
|
| 122 |
+
/** \addtogroup InplaceDecomposition
|
| 123 |
+
\ingroup DenseLinearSolvers_chapter */
|
| 124 |
+
/** \addtogroup DenseDecompositionBenchmark
|
| 125 |
+
\ingroup DenseLinearSolvers_chapter */
|
| 126 |
+
|
| 127 |
+
/** \addtogroup DenseLinearSolvers_Reference
|
| 128 |
+
\ingroup DenseLinearSolvers_chapter */
|
| 129 |
+
/** \addtogroup Cholesky_Module
|
| 130 |
+
\ingroup DenseLinearSolvers_Reference */
|
| 131 |
+
/** \addtogroup LU_Module
|
| 132 |
+
\ingroup DenseLinearSolvers_Reference */
|
| 133 |
+
/** \addtogroup QR_Module
|
| 134 |
+
\ingroup DenseLinearSolvers_Reference */
|
| 135 |
+
/** \addtogroup SVD_Module
|
| 136 |
+
\ingroup DenseLinearSolvers_Reference*/
|
| 137 |
+
/** \addtogroup Eigenvalues_Module
|
| 138 |
+
\ingroup DenseLinearSolvers_Reference */
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
/** \defgroup Sparse_chapter Sparse linear algebra */
|
| 144 |
+
/** \defgroup Sparse_Reference Reference */
|
| 145 |
+
|
| 146 |
+
/** \addtogroup TutorialSparse
|
| 147 |
+
\ingroup Sparse_chapter */
|
| 148 |
+
/** \addtogroup TopicSparseSystems
|
| 149 |
+
\ingroup Sparse_chapter */
|
| 150 |
+
/** \addtogroup MatrixfreeSolverExample
|
| 151 |
+
\ingroup Sparse_chapter */
|
| 152 |
+
|
| 153 |
+
/** \addtogroup Sparse_Reference
|
| 154 |
+
\ingroup Sparse_chapter */
|
| 155 |
+
/** \addtogroup SparseCore_Module
|
| 156 |
+
\ingroup Sparse_Reference */
|
| 157 |
+
/** \addtogroup OrderingMethods_Module
|
| 158 |
+
\ingroup Sparse_Reference */
|
| 159 |
+
/** \addtogroup SparseCholesky_Module
|
| 160 |
+
\ingroup Sparse_Reference */
|
| 161 |
+
/** \addtogroup SparseLU_Module
|
| 162 |
+
\ingroup Sparse_Reference */
|
| 163 |
+
/** \addtogroup SparseQR_Module
|
| 164 |
+
\ingroup Sparse_Reference */
|
| 165 |
+
/** \addtogroup IterativeLinearSolvers_Module
|
| 166 |
+
\ingroup Sparse_Reference */
|
| 167 |
+
/** \addtogroup Sparse_Module
|
| 168 |
+
\ingroup Sparse_Reference */
|
| 169 |
+
/** \addtogroup Support_modules
|
| 170 |
+
\ingroup Sparse_Reference */
|
| 171 |
+
|
| 172 |
+
/** \addtogroup SparseQuickRefPage
|
| 173 |
+
\ingroup Sparse_chapter */
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
/** \defgroup Geometry_chapter Geometry */
|
| 177 |
+
/** \defgroup Geometry_Reference Reference */
|
| 178 |
+
|
| 179 |
+
/** \addtogroup TutorialGeometry
|
| 180 |
+
\ingroup Geometry_chapter */
|
| 181 |
+
|
| 182 |
+
/** \addtogroup Geometry_Reference
|
| 183 |
+
\ingroup Geometry_chapter */
|
| 184 |
+
/** \addtogroup Geometry_Module
|
| 185 |
+
\ingroup Geometry_Reference */
|
| 186 |
+
/** \addtogroup Splines_Module
|
| 187 |
+
\ingroup Geometry_Reference */
|
| 188 |
+
|
| 189 |
+
/** \internal \brief Namespace containing low-level routines from the %Eigen library. */
|
| 190 |
+
namespace internal {}
|
| 191 |
+
}
|
include/eigen/doc/Overview.dox
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \mainpage notitle
|
| 4 |
+
|
| 5 |
+
This is the API documentation for Eigen3. You can <a href="eigen-doc.tgz">download</a> it as a tgz archive for offline reading.
|
| 6 |
+
|
| 7 |
+
For a first contact with Eigen, the best place is to have a look at the \subpage GettingStarted page that show you how to write and compile your first program with Eigen.
|
| 8 |
+
|
| 9 |
+
Then, the \b quick \b reference \b pages give you a quite complete description of the API in a very condensed format that is specially useful to recall the syntax of a particular feature, or to have a quick look at the API. They currently cover the two following feature sets, and more will come in the future:
|
| 10 |
+
- \link QuickRefPage [QuickRef] Dense matrix and array manipulations \endlink
|
| 11 |
+
- \link SparseQuickRefPage [QuickRef] Sparse linear algebra \endlink
|
| 12 |
+
|
| 13 |
+
You're a MatLab user? There is also a <a href="AsciiQuickReference.txt">short ASCII reference</a> with Matlab translations.
|
| 14 |
+
|
| 15 |
+
The \b main \b documentation is organized into \em chapters covering different domains of features.
|
| 16 |
+
They are themselves composed of \em user \em manual pages describing the different features in a comprehensive way, and \em reference pages that gives you access to the API documentation through the related Eigen's \em modules and \em classes.
|
| 17 |
+
|
| 18 |
+
Under the \subpage UserManual_CustomizingEigen section, you will find discussions and examples on extending %Eigen's features and supporting custom scalar types.
|
| 19 |
+
|
| 20 |
+
Under the \subpage UserManual_Generalities section, you will find documentation on more general topics such as preprocessor directives, controlling assertions, multi-threading, MKL support, some Eigen's internal insights, and much more...
|
| 21 |
+
|
| 22 |
+
For details regarding Eigen's inner-workings, see the \subpage UserManual_UnderstandingEigen section.
|
| 23 |
+
|
| 24 |
+
Some random topics can be found under the \subpage UnclassifiedPages section.
|
| 25 |
+
|
| 26 |
+
Finally, do not miss the search engine, useful to quickly get to the documentation of a given class or function.
|
| 27 |
+
|
| 28 |
+
Want more? Checkout the <a href="unsupported/index.html">\em unsupported \em modules </a> documentation.
|
| 29 |
+
|
| 30 |
+
*/
|
| 31 |
+
|
| 32 |
+
}
|
include/eigen/doc/PassingByValue.dox
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TopicPassingByValue Passing Eigen objects by value to functions
|
| 4 |
+
|
| 5 |
+
Passing objects by value is almost always a very bad idea in C++, as this means useless copies, and one should pass them by reference instead.
|
| 6 |
+
|
| 7 |
+
With %Eigen, this is even more important: passing \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" by value is not only inefficient, it can be illegal or make your program crash! And the reason is that these %Eigen objects have alignment modifiers that aren't respected when they are passed by value.
|
| 8 |
+
|
| 9 |
+
For example, a function like this, where \c v is passed by value:
|
| 10 |
+
|
| 11 |
+
\code
|
| 12 |
+
void my_function(Eigen::Vector2d v);
|
| 13 |
+
\endcode
|
| 14 |
+
|
| 15 |
+
needs to be rewritten as follows, passing \c v by const reference:
|
| 16 |
+
|
| 17 |
+
\code
|
| 18 |
+
void my_function(const Eigen::Vector2d& v);
|
| 19 |
+
\endcode
|
| 20 |
+
|
| 21 |
+
Likewise if you have a class having an %Eigen object as member:
|
| 22 |
+
|
| 23 |
+
\code
|
| 24 |
+
struct Foo
|
| 25 |
+
{
|
| 26 |
+
Eigen::Vector2d v;
|
| 27 |
+
};
|
| 28 |
+
void my_function(Foo v);
|
| 29 |
+
\endcode
|
| 30 |
+
|
| 31 |
+
This function also needs to be rewritten like this:
|
| 32 |
+
\code
|
| 33 |
+
void my_function(const Foo& v);
|
| 34 |
+
\endcode
|
| 35 |
+
|
| 36 |
+
Note that on the other hand, there is no problem with functions that return objects by value.
|
| 37 |
+
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
}
|
include/eigen/doc/Pitfalls.dox
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page TopicPitfalls Common pitfalls
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
\section TopicPitfalls_template_keyword Compilation error with template methods
|
| 7 |
+
|
| 8 |
+
See this \link TopicTemplateKeyword page \endlink.
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
\section TopicPitfalls_aliasing Aliasing
|
| 12 |
+
|
| 13 |
+
Don't miss this \link TopicAliasing page \endlink on aliasing,
|
| 14 |
+
especially if you got wrong results in statements where the destination appears on the right hand side of the expression.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
\section TopicPitfalls_alignment_issue Alignment Issues (runtime assertion)
|
| 18 |
+
|
| 19 |
+
%Eigen does explicit vectorization, and while that is appreciated by many users, that also leads to some issues in special situations where data alignment is compromised.
|
| 20 |
+
Indeed, prior to C++17, C++ does not have quite good enough support for explicit data alignment.
|
| 21 |
+
In that case your program hits an assertion failure (that is, a "controlled crash") with a message that tells you to consult this page:
|
| 22 |
+
\code
|
| 23 |
+
http://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
|
| 24 |
+
\endcode
|
| 25 |
+
Have a look at \link TopicUnalignedArrayAssert it \endlink and see for yourself if that's something that you can cope with.
|
| 26 |
+
It contains detailed information about how to deal with each known cause for that issue.
|
| 27 |
+
|
| 28 |
+
Now what if you don't care about vectorization and so don't want to be annoyed with these alignment issues? Then read \link getrid how to get rid of them \endlink.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
\section TopicPitfalls_auto_keyword C++11 and the auto keyword
|
| 32 |
+
|
| 33 |
+
In short: do not use the auto keywords with %Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a \c Matrix<> type. Here is an example:
|
| 34 |
+
|
| 35 |
+
\code
|
| 36 |
+
MatrixXd A, B;
|
| 37 |
+
auto C = A*B;
|
| 38 |
+
for(...) { ... w = C * v; ...}
|
| 39 |
+
\endcode
|
| 40 |
+
|
| 41 |
+
In this example, the type of C is not a \c MatrixXd but an abstract expression representing a matrix product and storing references to \c A and \c B.
|
| 42 |
+
Therefore, the product of \c A*B will be carried out multiple times, once per iteration of the for loop.
|
| 43 |
+
Moreover, if the coefficients of `A` or `B` change during the iteration, then `C` will evaluate to different values as in the following example:
|
| 44 |
+
|
| 45 |
+
\code
|
| 46 |
+
MatrixXd A = ..., B = ...;
|
| 47 |
+
auto C = A*B;
|
| 48 |
+
MatrixXd R1 = C;
|
| 49 |
+
A = ...;
|
| 50 |
+
MatrixXd R2 = C;
|
| 51 |
+
\endcode
|
| 52 |
+
for which we end up with `R1` ≠ `R2`.
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Here is another example leading to a segfault:
|
| 56 |
+
\code
|
| 57 |
+
auto C = ((A+B).eval()).transpose();
|
| 58 |
+
// do something with C
|
| 59 |
+
\endcode
|
| 60 |
+
The problem is that \c eval() returns a temporary object (in this case a \c MatrixXd) which is then referenced by the \c Transpose<> expression.
|
| 61 |
+
However, this temporary is deleted right after the first line, and then the \c C expression references a dead object.
|
| 62 |
+
One possible fix consists in applying \c eval() on the whole expression:
|
| 63 |
+
\code
|
| 64 |
+
auto C = (A+B).transpose().eval();
|
| 65 |
+
\endcode
|
| 66 |
+
|
| 67 |
+
The same issue might occur when sub expressions are automatically evaluated by %Eigen as in the following example:
|
| 68 |
+
\code
|
| 69 |
+
VectorXd u, v;
|
| 70 |
+
auto C = u + (A*v).normalized();
|
| 71 |
+
// do something with C
|
| 72 |
+
\endcode
|
| 73 |
+
Here the \c normalized() method has to evaluate the expensive product \c A*v to avoid evaluating it twice.
|
| 74 |
+
Again, one possible fix is to call \c .eval() on the whole expression:
|
| 75 |
+
\code
|
| 76 |
+
auto C = (u + (A*v).normalized()).eval();
|
| 77 |
+
\endcode
|
| 78 |
+
In this case, \c C will be a regular \c VectorXd object.
|
| 79 |
+
Note that DenseBase::eval() is smart enough to avoid copies when the underlying expression is already a plain \c Matrix<>.
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
\section TopicPitfalls_header_issues Header Issues (failure to compile)
|
| 83 |
+
|
| 84 |
+
With all libraries, one must check the documentation for which header to include.
|
| 85 |
+
The same is true with %Eigen, but slightly worse: with %Eigen, a method in a class may require an additional \c \#include over what the class itself requires!
|
| 86 |
+
For example, if you want to use the \c cross() method on a vector (it computes a cross-product) then you need to:
|
| 87 |
+
\code
|
| 88 |
+
#include<Eigen/Geometry>
|
| 89 |
+
\endcode
|
| 90 |
+
We try to always document this, but do tell us if we forgot an occurrence.
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
\section TopicPitfalls_ternary_operator Ternary operator
|
| 94 |
+
|
| 95 |
+
In short: avoid the use of the ternary operator <code>(COND ? THEN : ELSE)</code> with %Eigen's expressions for the \c THEN and \c ELSE statements.
|
| 96 |
+
To see why, let's consider the following example:
|
| 97 |
+
\code
|
| 98 |
+
Vector3f A;
|
| 99 |
+
A << 1, 2, 3;
|
| 100 |
+
Vector3f B = ((1 < 0) ? (A.reverse()) : A);
|
| 101 |
+
\endcode
|
| 102 |
+
This example will return <code>B = 3, 2, 1</code>. Do you see why?
|
| 103 |
+
The reason is that in c++ the type of the \c ELSE statement is inferred from the type of the \c THEN expression such that both match.
|
| 104 |
+
Since \c THEN is a <code>Reverse<Vector3f></code>, the \c ELSE statement A is converted to a <code>Reverse<Vector3f></code>, and the compiler thus generates:
|
| 105 |
+
\code
|
| 106 |
+
Vector3f B = ((1 < 0) ? (A.reverse()) : Reverse<Vector3f>(A));
|
| 107 |
+
\endcode
|
| 108 |
+
In this very particular case, a workaround would be to call A.reverse().eval() for the \c THEN statement, but the safest and fastest is really to avoid this ternary operator with %Eigen's expressions and use a if/else construct.
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
\section TopicPitfalls_pass_by_value Pass-by-value
|
| 112 |
+
|
| 113 |
+
If you don't know why passing-by-value is wrong with %Eigen, read this \link TopicPassingByValue page \endlink first.
|
| 114 |
+
|
| 115 |
+
While you may be extremely careful and use care to make sure that all of your code that explicitly uses %Eigen types is pass-by-reference you have to watch out for templates which define the argument types at compile time.
|
| 116 |
+
|
| 117 |
+
If a template has a function that takes arguments pass-by-value, and the relevant template parameter ends up being an %Eigen type, then you will of course have the same alignment problems that you would in an explicitly defined function passing %Eigen types by reference.
|
| 118 |
+
|
| 119 |
+
Using %Eigen types with other third party libraries or even the STL can present the same problem.
|
| 120 |
+
<code>boost::bind</code> for example uses pass-by-value to store arguments in the returned functor.
|
| 121 |
+
This will of course be a problem.
|
| 122 |
+
|
| 123 |
+
There are at least two ways around this:
|
| 124 |
+
- If the value you are passing is guaranteed to be around for the life of the functor, you can use boost::ref() to wrap the value as you pass it to boost::bind. Generally this is not a solution for values on the stack as if the functor ever gets passed to a lower or independent scope, the object may be gone by the time it's attempted to be used.
|
| 125 |
+
- The other option is to make your functions take a reference counted pointer like boost::shared_ptr as the argument. This avoids needing to worry about managing the lifetime of the object being passed.
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
\section TopicPitfalls_matrix_bool Matrices with boolean coefficients
|
| 129 |
+
|
| 130 |
+
The current behaviour of using \c Matrix with boolean coefficients is inconsistent and likely to change in future versions of Eigen, so please use it carefully!
|
| 131 |
+
|
| 132 |
+
A simple example for such an inconsistency is
|
| 133 |
+
|
| 134 |
+
\code
|
| 135 |
+
template<int Size>
|
| 136 |
+
void foo() {
|
| 137 |
+
Eigen::Matrix<bool, Size, Size> A, B, C;
|
| 138 |
+
A.setOnes();
|
| 139 |
+
B.setOnes();
|
| 140 |
+
|
| 141 |
+
C = A * B - A * B;
|
| 142 |
+
std::cout << C << "\n";
|
| 143 |
+
}
|
| 144 |
+
\endcode
|
| 145 |
+
|
| 146 |
+
since calling \c foo<3>() prints the zero matrix while calling \c foo<10>() prints the identity matrix.
|
| 147 |
+
|
| 148 |
+
*/
|
| 149 |
+
}
|
include/eigen/doc/QuickStartGuide.dox
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page GettingStarted Getting started
|
| 4 |
+
|
| 5 |
+
\eigenAutoToc
|
| 6 |
+
|
| 7 |
+
This is a very short guide on how to get started with Eigen. It has a dual purpose. It serves as a minimal introduction to the Eigen library for people who want to start coding as soon as possible. You can also read this page as the first part of the Tutorial, which explains the library in more detail; in this case you will continue with \ref TutorialMatrixClass.
|
| 8 |
+
|
| 9 |
+
\section GettingStartedInstallation How to "install" Eigen?
|
| 10 |
+
|
| 11 |
+
In order to use Eigen, you just need to download and extract Eigen's source code (see <a href="http://eigen.tuxfamily.org/index.php?title=Main_Page#Download">the wiki</a> for download instructions). In fact, the header files in the \c Eigen subdirectory are the only files required to compile programs using Eigen. The header files are the same for all platforms. It is not necessary to use CMake or install anything.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
\section GettingStartedFirstProgram A simple first program
|
| 15 |
+
|
| 16 |
+
Here is a rather simple program to get you started.
|
| 17 |
+
|
| 18 |
+
\include QuickStart_example.cpp
|
| 19 |
+
|
| 20 |
+
We will explain the program after telling you how to compile it.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
\section GettingStartedCompiling Compiling and running your first program
|
| 24 |
+
|
| 25 |
+
There is no library to link to. The only thing that you need to keep in mind when compiling the above program is that the compiler must be able to find the Eigen header files. The directory in which you placed Eigen's source code must be in the include path. With GCC you use the \c -I option to achieve this, so you can compile the program with a command like this:
|
| 26 |
+
|
| 27 |
+
\code g++ -I /path/to/eigen/ my_program.cpp -o my_program \endcode
|
| 28 |
+
|
| 29 |
+
On Linux or Mac OS X, another option is to symlink or copy the Eigen folder into \c /usr/local/include/. This way, you can compile the program with:
|
| 30 |
+
|
| 31 |
+
\code g++ my_program.cpp -o my_program \endcode
|
| 32 |
+
|
| 33 |
+
When you run the program, it produces the following output:
|
| 34 |
+
|
| 35 |
+
\include QuickStart_example.out
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
\section GettingStartedExplanation Explanation of the first program
|
| 39 |
+
|
| 40 |
+
The Eigen header files define many types, but for simple applications it may be enough to use only the \c MatrixXd type. This represents a matrix of arbitrary size (hence the \c X in \c MatrixXd), in which every entry is a \c double (hence the \c d in \c MatrixXd). See the \ref QuickRef_Types "quick reference guide" for an overview of the different types you can use to represent a matrix.
|
| 41 |
+
|
| 42 |
+
The \c Eigen/Dense header file defines all member functions for the MatrixXd type and related types (see also the \ref QuickRef_Headers "table of header files"). All classes and functions defined in this header file (and other Eigen header files) are in the \c Eigen namespace.
|
| 43 |
+
|
| 44 |
+
The first line of the \c main function declares a variable of type \c MatrixXd and specifies that it is a matrix with 2 rows and 2 columns (the entries are not initialized). The statement <tt>m(0,0) = 3</tt> sets the entry in the top-left corner to 3. You need to use round parentheses to refer to entries in the matrix. As usual in computer science, the index of the first index is 0, as opposed to the convention in mathematics that the first index is 1.
|
| 45 |
+
|
| 46 |
+
The following three statements sets the other three entries. The final line outputs the matrix \c m to the standard output stream.
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
\section GettingStartedExample2 Example 2: Matrices and vectors
|
| 50 |
+
|
| 51 |
+
Here is another example, which combines matrices with vectors. Concentrate on the left-hand program for now; we will talk about the right-hand program later.
|
| 52 |
+
|
| 53 |
+
<table class="manual">
|
| 54 |
+
<tr><th>Size set at run time:</th><th>Size set at compile time:</th></tr>
|
| 55 |
+
<tr><td>
|
| 56 |
+
\include QuickStart_example2_dynamic.cpp
|
| 57 |
+
</td>
|
| 58 |
+
<td>
|
| 59 |
+
\include QuickStart_example2_fixed.cpp
|
| 60 |
+
</td></tr></table>
|
| 61 |
+
|
| 62 |
+
The output is as follows:
|
| 63 |
+
|
| 64 |
+
\include QuickStart_example2_dynamic.out
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
\section GettingStartedExplanation2 Explanation of the second example
|
| 68 |
+
|
| 69 |
+
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const DenseBase::Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetic.
|
| 70 |
+
|
| 71 |
+
The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
|
| 72 |
+
|
| 73 |
+
\f[
|
| 74 |
+
v =
|
| 75 |
+
\begin{bmatrix}
|
| 76 |
+
1 \\
|
| 77 |
+
2 \\
|
| 78 |
+
3
|
| 79 |
+
\end{bmatrix}.
|
| 80 |
+
\f]
|
| 81 |
+
|
| 82 |
+
The final line of the program multiplies the matrix \c m with the vector \c v and outputs the result.
|
| 83 |
+
|
| 84 |
+
Now look back at the second example program. We presented two versions of it. In the version in the left column, the matrix is of type \c MatrixXd which represents matrices of arbitrary size. The version in the right column is similar, except that the matrix is of type \c Matrix3d, which represents matrices of a fixed size (here 3-by-3). Because the type already encodes the size of the matrix, it is not necessary to specify the size in the constructor; compare <tt>MatrixXd m(3,3)</tt> with <tt>Matrix3d m</tt>. Similarly, we have \c VectorXd on the left (arbitrary size) versus \c Vector3d on the right (fixed size). Note that here the coefficients of vector \c v are directly set in the constructor, though the same syntax of the left example could be used too.
|
| 85 |
+
|
| 86 |
+
The use of fixed-size matrices and vectors has two advantages. The compiler emits better (faster) code because it knows the size of the matrices and vectors. Specifying the size in the type also allows for more rigorous checking at compile-time. For instance, the compiler will complain if you try to multiply a \c Matrix4d (a 4-by-4 matrix) with a \c Vector3d (a vector of size 3). However, the use of many types increases compilation time and the size of the executable. The size of the matrix may also not be known at compile-time. A rule of thumb is to use fixed-size matrices for size 4-by-4 and smaller.
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
\section GettingStartedConclusion Where to go from here?
|
| 90 |
+
|
| 91 |
+
It's worth taking the time to read the \ref TutorialMatrixClass "long tutorial".
|
| 92 |
+
|
| 93 |
+
However if you think you don't need it, you can directly use the classes documentation and our \ref QuickRefPage.
|
| 94 |
+
|
| 95 |
+
\li \b Next: \ref TutorialMatrixClass
|
| 96 |
+
|
| 97 |
+
*/
|
| 98 |
+
|
| 99 |
+
}
|
| 100 |
+
|
include/eigen/doc/SparseQuickReference.dox
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
/** \eigenManualPage SparseQuickRefPage Quick reference guide for sparse matrices
|
| 3 |
+
\eigenAutoToc
|
| 4 |
+
|
| 5 |
+
<hr>
|
| 6 |
+
|
| 7 |
+
In this page, we give a quick summary of the main operations available for sparse matrices in the class SparseMatrix. First, it is recommended to read the introductory tutorial at \ref TutorialSparse. The important point to have in mind when working on sparse matrices is how they are stored :
|
| 8 |
+
i.e either row major or column major. The default is column major. Most arithmetic operations on sparse matrices will assert that they have the same storage order.
|
| 9 |
+
|
| 10 |
+
\section SparseMatrixInit Sparse Matrix Initialization
|
| 11 |
+
<table class="manual">
|
| 12 |
+
<tr><th> Category </th> <th> Operations</th> <th>Notes</th></tr>
|
| 13 |
+
<tr><td>Constructor</td>
|
| 14 |
+
<td>
|
| 15 |
+
\code
|
| 16 |
+
SparseMatrix<double> sm1(1000,1000);
|
| 17 |
+
SparseMatrix<std::complex<double>,RowMajor> sm2;
|
| 18 |
+
\endcode
|
| 19 |
+
</td> <td> Default is ColMajor</td> </tr>
|
| 20 |
+
<tr class="alt">
|
| 21 |
+
<td> Resize/Reserve</td>
|
| 22 |
+
<td>
|
| 23 |
+
\code
|
| 24 |
+
sm1.resize(m,n); // Change sm1 to a m x n matrix.
|
| 25 |
+
sm1.reserve(nnz); // Allocate room for nnz nonzeros elements.
|
| 26 |
+
\endcode
|
| 27 |
+
</td>
|
| 28 |
+
<td> Note that when calling reserve(), it is not required that nnz is the exact number of nonzero elements in the final matrix. However, an exact estimation will avoid multiple reallocations during the insertion phase. </td>
|
| 29 |
+
</tr>
|
| 30 |
+
<tr>
|
| 31 |
+
<td> Assignment </td>
|
| 32 |
+
<td>
|
| 33 |
+
\code
|
| 34 |
+
SparseMatrix<double,Colmajor> sm1;
|
| 35 |
+
// Initialize sm2 with sm1.
|
| 36 |
+
SparseMatrix<double,Rowmajor> sm2(sm1), sm3;
|
| 37 |
+
// Assignment and evaluations modify the storage order.
|
| 38 |
+
sm3 = sm1;
|
| 39 |
+
\endcode
|
| 40 |
+
</td>
|
| 41 |
+
<td> The copy constructor can be used to convert from a storage order to another</td>
|
| 42 |
+
</tr>
|
| 43 |
+
<tr class="alt">
|
| 44 |
+
<td> Element-wise Insertion</td>
|
| 45 |
+
<td>
|
| 46 |
+
\code
|
| 47 |
+
// Insert a new element;
|
| 48 |
+
sm1.insert(i, j) = v_ij;
|
| 49 |
+
|
| 50 |
+
// Update the value v_ij
|
| 51 |
+
sm1.coeffRef(i,j) = v_ij;
|
| 52 |
+
sm1.coeffRef(i,j) += v_ij;
|
| 53 |
+
sm1.coeffRef(i,j) -= v_ij;
|
| 54 |
+
\endcode
|
| 55 |
+
</td>
|
| 56 |
+
<td> insert() assumes that the element does not already exist; otherwise, use coeffRef()</td>
|
| 57 |
+
</tr>
|
| 58 |
+
<tr>
|
| 59 |
+
<td> Batch insertion</td>
|
| 60 |
+
<td>
|
| 61 |
+
\code
|
| 62 |
+
std::vector< Eigen::Triplet<double> > tripletList;
|
| 63 |
+
tripletList.reserve(estimation_of_entries);
|
| 64 |
+
// -- Fill tripletList with nonzero elements...
|
| 65 |
+
sm1.setFromTriplets(TripletList.begin(), TripletList.end());
|
| 66 |
+
\endcode
|
| 67 |
+
</td>
|
| 68 |
+
<td>A complete example is available at \link TutorialSparseFilling Triplet Insertion \endlink.</td>
|
| 69 |
+
</tr>
|
| 70 |
+
<tr class="alt">
|
| 71 |
+
<td> Constant or Random Insertion</td>
|
| 72 |
+
<td>
|
| 73 |
+
\code
|
| 74 |
+
sm1.setZero();
|
| 75 |
+
\endcode
|
| 76 |
+
</td>
|
| 77 |
+
<td>Remove all non-zero coefficients</td>
|
| 78 |
+
</tr>
|
| 79 |
+
</table>
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
\section SparseBasicInfos Matrix properties
|
| 83 |
+
Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some information from the matrix.
|
| 84 |
+
<table class="manual">
|
| 85 |
+
<tr>
|
| 86 |
+
<td> \code
|
| 87 |
+
sm1.rows(); // Number of rows
|
| 88 |
+
sm1.cols(); // Number of columns
|
| 89 |
+
sm1.nonZeros(); // Number of non zero values
|
| 90 |
+
sm1.outerSize(); // Number of columns (resp. rows) for a column major (resp. row major )
|
| 91 |
+
sm1.innerSize(); // Number of rows (resp. columns) for a row major (resp. column major)
|
| 92 |
+
sm1.norm(); // Euclidean norm of the matrix
|
| 93 |
+
sm1.squaredNorm(); // Squared norm of the matrix
|
| 94 |
+
sm1.blueNorm();
|
| 95 |
+
sm1.isVector(); // Check if sm1 is a sparse vector or a sparse matrix
|
| 96 |
+
sm1.isCompressed(); // Check if sm1 is in compressed form
|
| 97 |
+
...
|
| 98 |
+
\endcode </td>
|
| 99 |
+
</tr>
|
| 100 |
+
</table>
|
| 101 |
+
|
| 102 |
+
\section SparseBasicOps Arithmetic operations
|
| 103 |
+
It is easy to perform arithmetic operations on sparse matrices provided that the dimensions are adequate and that the matrices have the same storage order. Note that the evaluation can always be done in a matrix with a different storage order. In the following, \b sm denotes a sparse matrix, \b dm a dense matrix and \b dv a dense vector.
|
| 104 |
+
<table class="manual">
|
| 105 |
+
<tr><th> Operations </th> <th> Code </th> <th> Notes </th></tr>
|
| 106 |
+
|
| 107 |
+
<tr>
|
| 108 |
+
<td> add subtract </td>
|
| 109 |
+
<td> \code
|
| 110 |
+
sm3 = sm1 + sm2;
|
| 111 |
+
sm3 = sm1 - sm2;
|
| 112 |
+
sm2 += sm1;
|
| 113 |
+
sm2 -= sm1; \endcode
|
| 114 |
+
</td>
|
| 115 |
+
<td>
|
| 116 |
+
sm1 and sm2 should have the same storage order
|
| 117 |
+
</td>
|
| 118 |
+
</tr>
|
| 119 |
+
|
| 120 |
+
<tr class="alt"><td>
|
| 121 |
+
scalar product</td><td>\code
|
| 122 |
+
sm3 = sm1 * s1; sm3 *= s1;
|
| 123 |
+
sm3 = s1 * sm1 + s2 * sm2; sm3 /= s1;\endcode
|
| 124 |
+
</td>
|
| 125 |
+
<td>
|
| 126 |
+
Many combinations are possible if the dimensions and the storage order agree.
|
| 127 |
+
</tr>
|
| 128 |
+
|
| 129 |
+
<tr>
|
| 130 |
+
<td> %Sparse %Product </td>
|
| 131 |
+
<td> \code
|
| 132 |
+
sm3 = sm1 * sm2;
|
| 133 |
+
dm2 = sm1 * dm1;
|
| 134 |
+
dv2 = sm1 * dv1;
|
| 135 |
+
\endcode </td>
|
| 136 |
+
<td>
|
| 137 |
+
</td>
|
| 138 |
+
</tr>
|
| 139 |
+
|
| 140 |
+
<tr class='alt'>
|
| 141 |
+
<td> transposition, adjoint</td>
|
| 142 |
+
<td> \code
|
| 143 |
+
sm2 = sm1.transpose();
|
| 144 |
+
sm2 = sm1.adjoint();
|
| 145 |
+
\endcode </td>
|
| 146 |
+
<td>
|
| 147 |
+
Note that the transposition change the storage order. There is no support for transposeInPlace().
|
| 148 |
+
</td>
|
| 149 |
+
</tr>
|
| 150 |
+
<tr>
|
| 151 |
+
<td> Permutation </td>
|
| 152 |
+
<td>
|
| 153 |
+
\code
|
| 154 |
+
perm.indices(); // Reference to the vector of indices
|
| 155 |
+
sm1.twistedBy(perm); // Permute rows and columns
|
| 156 |
+
sm2 = sm1 * perm; // Permute the rows
|
| 157 |
+
sm2 = perm * sm1; // Permute the columns
|
| 158 |
+
\endcode
|
| 159 |
+
</td>
|
| 160 |
+
<td>
|
| 161 |
+
|
| 162 |
+
</td>
|
| 163 |
+
</tr>
|
| 164 |
+
<tr>
|
| 165 |
+
<td>
|
| 166 |
+
Component-wise ops
|
| 167 |
+
</td>
|
| 168 |
+
<td>\code
|
| 169 |
+
sm1.cwiseProduct(sm2);
|
| 170 |
+
sm1.cwiseQuotient(sm2);
|
| 171 |
+
sm1.cwiseMin(sm2);
|
| 172 |
+
sm1.cwiseMax(sm2);
|
| 173 |
+
sm1.cwiseAbs();
|
| 174 |
+
sm1.cwiseSqrt();
|
| 175 |
+
\endcode</td>
|
| 176 |
+
<td>
|
| 177 |
+
sm1 and sm2 should have the same storage order
|
| 178 |
+
</td>
|
| 179 |
+
</tr>
|
| 180 |
+
</table>
|
| 181 |
+
|
| 182 |
+
\section sparseotherops Other supported operations
|
| 183 |
+
<table class="manual">
|
| 184 |
+
<tr><th style="min-width:initial"> Code </th> <th> Notes</th> </tr>
|
| 185 |
+
<tr><td colspan="2">Sub-matrices</td></tr>
|
| 186 |
+
<tr>
|
| 187 |
+
<td>
|
| 188 |
+
\code
|
| 189 |
+
sm1.block(startRow, startCol, rows, cols);
|
| 190 |
+
sm1.block(startRow, startCol);
|
| 191 |
+
sm1.topLeftCorner(rows, cols);
|
| 192 |
+
sm1.topRightCorner(rows, cols);
|
| 193 |
+
sm1.bottomLeftCorner( rows, cols);
|
| 194 |
+
sm1.bottomRightCorner( rows, cols);
|
| 195 |
+
\endcode
|
| 196 |
+
</td><td>
|
| 197 |
+
Contrary to dense matrices, here <strong>all these methods are read-only</strong>.\n
|
| 198 |
+
See \ref TutorialSparse_SubMatrices and below for read-write sub-matrices.
|
| 199 |
+
</td>
|
| 200 |
+
</tr>
|
| 201 |
+
<tr class="alt"><td colspan="2"> Range </td></tr>
|
| 202 |
+
<tr class="alt">
|
| 203 |
+
<td>
|
| 204 |
+
\code
|
| 205 |
+
sm1.innerVector(outer); // RW
|
| 206 |
+
sm1.innerVectors(start, size); // RW
|
| 207 |
+
sm1.leftCols(size); // RW
|
| 208 |
+
sm2.rightCols(size); // RO because sm2 is row-major
|
| 209 |
+
sm1.middleRows(start, numRows); // RO because sm1 is column-major
|
| 210 |
+
sm1.middleCols(start, numCols); // RW
|
| 211 |
+
sm1.col(j); // RW
|
| 212 |
+
\endcode
|
| 213 |
+
</td>
|
| 214 |
+
<td>
|
| 215 |
+
A inner vector is either a row (for row-major) or a column (for column-major).\n
|
| 216 |
+
As stated earlier, for a read-write sub-matrix (RW), the evaluation can be done in a matrix with different storage order.
|
| 217 |
+
</td>
|
| 218 |
+
</tr>
|
| 219 |
+
<tr><td colspan="2"> Triangular and selfadjoint views</td></tr>
|
| 220 |
+
<tr>
|
| 221 |
+
<td>
|
| 222 |
+
\code
|
| 223 |
+
sm2 = sm1.triangularview<Lower>();
|
| 224 |
+
sm2 = sm1.selfadjointview<Lower>();
|
| 225 |
+
\endcode
|
| 226 |
+
</td>
|
| 227 |
+
<td> Several combination between triangular views and blocks views are possible
|
| 228 |
+
\code
|
| 229 |
+
\endcode </td>
|
| 230 |
+
</tr>
|
| 231 |
+
<tr class="alt"><td colspan="2">Triangular solve </td></tr>
|
| 232 |
+
<tr class="alt">
|
| 233 |
+
<td>
|
| 234 |
+
\code
|
| 235 |
+
dv2 = sm1.triangularView<Upper>().solve(dv1);
|
| 236 |
+
dv2 = sm1.topLeftCorner(size, size)
|
| 237 |
+
.triangularView<Lower>().solve(dv1);
|
| 238 |
+
\endcode
|
| 239 |
+
</td>
|
| 240 |
+
<td> For general sparse solve, Use any suitable module described at \ref TopicSparseSystems </td>
|
| 241 |
+
</tr>
|
| 242 |
+
<tr><td colspan="2"> Low-level API</td></tr>
|
| 243 |
+
<tr>
|
| 244 |
+
<td>
|
| 245 |
+
\code
|
| 246 |
+
sm1.valuePtr(); // Pointer to the values
|
| 247 |
+
sm1.innerIndexPtr(); // Pointer to the indices.
|
| 248 |
+
sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector
|
| 249 |
+
\endcode
|
| 250 |
+
</td>
|
| 251 |
+
<td>
|
| 252 |
+
If the matrix is not in compressed form, `makeCompressed()` should be called before.\n
|
| 253 |
+
Note that these functions are mostly provided for interoperability purposes with external libraries.\n
|
| 254 |
+
A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
|
| 255 |
+
</tr>
|
| 256 |
+
<tr class="alt"><td colspan="2">Mapping external buffers</td></tr>
|
| 257 |
+
<tr class="alt">
|
| 258 |
+
<td>
|
| 259 |
+
\code
|
| 260 |
+
int outerIndexPtr[cols+1];
|
| 261 |
+
int innerIndices[nnz];
|
| 262 |
+
double values[nnz];
|
| 263 |
+
Map<SparseMatrix<double> > sm1(rows,cols,nnz,outerIndexPtr, // read-write
|
| 264 |
+
innerIndices,values);
|
| 265 |
+
Map<const SparseMatrix<double> > sm2(...); // read-only
|
| 266 |
+
\endcode
|
| 267 |
+
</td>
|
| 268 |
+
<td>As for dense matrices, class Map<SparseMatrixType> can be used to see external buffers as an %Eigen's SparseMatrix object. </td>
|
| 269 |
+
</tr>
|
| 270 |
+
</table>
|
| 271 |
+
*/
|
| 272 |
+
}
|
include/eigen/doc/StlContainers.dox
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TopicStlContainers Using STL Containers with Eigen
|
| 4 |
+
|
| 5 |
+
\eigenAutoToc
|
| 6 |
+
|
| 7 |
+
\section StlContainers_summary Executive summary
|
| 8 |
+
|
| 9 |
+
If you're compiling in \cpp17 mode only with a sufficiently recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then everything is taken care by the compiler and you can stop reading.
|
| 10 |
+
|
| 11 |
+
Otherwise, using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires the use of an over-aligned allocator.
|
| 12 |
+
That is, an allocator capable of allocating buffers with 16, 32, or even 64 bytes alignment.
|
| 13 |
+
%Eigen does provide one ready for use: aligned_allocator.
|
| 14 |
+
|
| 15 |
+
Prior to \cpp11, if you want to use the `std::vector` container, then you also have to <code> \#include <Eigen/StdVector> </code>.
|
| 16 |
+
|
| 17 |
+
These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
|
| 18 |
+
For other %Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
|
| 19 |
+
|
| 20 |
+
\section allocator Using an aligned allocator
|
| 21 |
+
|
| 22 |
+
STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned (or more) locations. Fortunately, %Eigen does provide such an allocator: Eigen::aligned_allocator.
|
| 23 |
+
|
| 24 |
+
For example, instead of
|
| 25 |
+
\code
|
| 26 |
+
std::map<int, Eigen::Vector4d>
|
| 27 |
+
\endcode
|
| 28 |
+
you need to use
|
| 29 |
+
\code
|
| 30 |
+
std::map<int, Eigen::Vector4d, std::less<int>,
|
| 31 |
+
Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4d> > >
|
| 32 |
+
\endcode
|
| 33 |
+
Note that the third parameter `std::less<int>` is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
|
| 34 |
+
|
| 35 |
+
\section StlContainers_vector The case of std::vector
|
| 36 |
+
|
| 37 |
+
This section is for c++98/03 users only. \cpp11 (or above) users can stop reading here.
|
| 38 |
+
|
| 39 |
+
So in c++98/03, the situation with `std::vector` is more complicated because of a bug in the standard (explanation below).
|
| 40 |
+
To workaround the issue, we had to specialize it for the Eigen::aligned_allocator type.
|
| 41 |
+
In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
|
| 42 |
+
|
| 43 |
+
Here is an example:
|
| 44 |
+
\code
|
| 45 |
+
#include<Eigen/StdVector>
|
| 46 |
+
/* ... */
|
| 47 |
+
std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
|
| 48 |
+
\endcode
|
| 49 |
+
|
| 50 |
+
<span class="note">\b Explanation: The `resize()` method of `std::vector` takes a `value_type` argument (defaulting to `value_type()`). So with `std::vector<Eigen::Vector4d>`, some Eigen::Vector4d objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4d can be created at an unaligned location.
|
| 51 |
+
In order to avoid that, the only solution we saw was to specialize `std::vector` to make it work on a slight modification of, here, Eigen::Vector4d, that is able to deal properly with this situation.
|
| 52 |
+
</span>
|
| 53 |
+
|
| 54 |
+
\subsection vector_spec An alternative - specializing std::vector for Eigen types
|
| 55 |
+
|
| 56 |
+
As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment.
|
| 57 |
+
The advantage is that you won't need to declare std::vector all over with Eigen::aligned_allocator. One drawback on the other hand side is that
|
| 58 |
+
the specialization needs to be defined before all code pieces in which e.g. `std::vector<Vector2d>` is used. Otherwise, without knowing the specialization
|
| 59 |
+
the compiler will compile that particular instance with the default `std::allocator` and you program is most likely to crash.
|
| 60 |
+
|
| 61 |
+
Here is an example:
|
| 62 |
+
\code
|
| 63 |
+
#include<Eigen/StdVector>
|
| 64 |
+
/* ... */
|
| 65 |
+
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
|
| 66 |
+
std::vector<Eigen::Vector2d>
|
| 67 |
+
\endcode
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
*/
|
| 72 |
+
|
| 73 |
+
}
|
include/eigen/doc/StorageOrders.dox
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TopicStorageOrders Storage orders
|
| 4 |
+
|
| 5 |
+
There are two different storage orders for matrices and two-dimensional arrays: column-major and row-major.
|
| 6 |
+
This page explains these storage orders and how to specify which one should be used.
|
| 7 |
+
|
| 8 |
+
\eigenAutoToc
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
\section TopicStorageOrdersIntro Column-major and row-major storage
|
| 12 |
+
|
| 13 |
+
The entries of a matrix form a two-dimensional grid. However, when the matrix is stored in memory, the entries
|
| 14 |
+
have to somehow be laid out linearly. There are two main ways to do this, by row and by column.
|
| 15 |
+
|
| 16 |
+
We say that a matrix is stored in \b row-major order if it is stored row by row. The entire first row is
|
| 17 |
+
stored first, followed by the entire second row, and so on. Consider for example the matrix
|
| 18 |
+
|
| 19 |
+
\f[
|
| 20 |
+
A = \begin{bmatrix}
|
| 21 |
+
8 & 2 & 2 & 9 \\
|
| 22 |
+
9 & 1 & 4 & 4 \\
|
| 23 |
+
3 & 5 & 4 & 5
|
| 24 |
+
\end{bmatrix}.
|
| 25 |
+
\f]
|
| 26 |
+
|
| 27 |
+
If this matrix is stored in row-major order, then the entries are laid out in memory as follows:
|
| 28 |
+
|
| 29 |
+
\code 8 2 2 9 9 1 4 4 3 5 4 5 \endcode
|
| 30 |
+
|
| 31 |
+
On the other hand, a matrix is stored in \b column-major order if it is stored column by column, starting with
|
| 32 |
+
the entire first column, followed by the entire second column, and so on. If the above matrix is stored in
|
| 33 |
+
column-major order, it is laid out as follows:
|
| 34 |
+
|
| 35 |
+
\code 8 9 3 2 1 5 2 4 4 9 4 5 \endcode
|
| 36 |
+
|
| 37 |
+
This example is illustrated by the following Eigen code. It uses the PlainObjectBase::data() function, which
|
| 38 |
+
returns a pointer to the memory location of the first entry of the matrix.
|
| 39 |
+
|
| 40 |
+
<table class="example">
|
| 41 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 42 |
+
<tr><td>
|
| 43 |
+
\include TopicStorageOrders_example.cpp
|
| 44 |
+
</td>
|
| 45 |
+
<td>
|
| 46 |
+
\verbinclude TopicStorageOrders_example.out
|
| 47 |
+
</td></tr></table>
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
\section TopicStorageOrdersInEigen Storage orders in Eigen
|
| 51 |
+
|
| 52 |
+
The storage order of a matrix or a two-dimensional array can be set by specifying the \c Options template
|
| 53 |
+
parameter for Matrix or Array. As \ref TutorialMatrixClass explains, the %Matrix class template has six
|
| 54 |
+
template parameters, of which three are compulsory (\c Scalar, \c RowsAtCompileTime and \c ColsAtCompileTime)
|
| 55 |
+
and three are optional (\c Options, \c MaxRowsAtCompileTime and \c MaxColsAtCompileTime). If the \c Options
|
| 56 |
+
parameter is set to \c RowMajor, then the matrix or array is stored in row-major order; if it is set to
|
| 57 |
+
\c ColMajor, then it is stored in column-major order. This mechanism is used in the above Eigen program to
|
| 58 |
+
specify the storage order.
|
| 59 |
+
|
| 60 |
+
If the storage order is not specified, then Eigen defaults to storing the entry in column-major. This is also
|
| 61 |
+
the case if one of the convenience typedefs (\c Matrix3f, \c ArrayXXd, etc.) is used.
|
| 62 |
+
|
| 63 |
+
Matrices and arrays using one storage order can be assigned to matrices and arrays using the other storage
|
| 64 |
+
order, as happens in the above program when \c Arowmajor is initialized using \c Acolmajor. Eigen will reorder
|
| 65 |
+
the entries automatically. More generally, row-major and column-major matrices can be mixed in an expression
|
| 66 |
+
as we want.
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
\section TopicStorageOrdersWhich Which storage order to choose?
|
| 70 |
+
|
| 71 |
+
So, which storage order should you use in your program? There is no simple answer to this question; it depends
|
| 72 |
+
on your application. Here are some points to keep in mind:
|
| 73 |
+
|
| 74 |
+
- Your users may expect you to use a specific storage order. Alternatively, you may use other libraries than
|
| 75 |
+
Eigen, and these other libraries may expect a certain storage order. In these cases it may be easiest and
|
| 76 |
+
fastest to use this storage order in your whole program.
|
| 77 |
+
- Algorithms that traverse a matrix row by row will go faster when the matrix is stored in row-major order
|
| 78 |
+
because of better data locality. Similarly, column-by-column traversal is faster for column-major
|
| 79 |
+
matrices. It may be worthwhile to experiment a bit to find out what is faster for your particular
|
| 80 |
+
application.
|
| 81 |
+
- The default in Eigen is column-major. Naturally, most of the development and testing of the Eigen library
|
| 82 |
+
is thus done with column-major matrices. This means that, even though we aim to support column-major and
|
| 83 |
+
row-major storage orders transparently, the Eigen library may well work best with column-major matrices.
|
| 84 |
+
|
| 85 |
+
*/
|
| 86 |
+
}
|
include/eigen/doc/TemplateKeyword.dox
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page TopicTemplateKeyword The template and typename keywords in C++
|
| 4 |
+
|
| 5 |
+
There are two uses for the \c template and \c typename keywords in C++. One of them is fairly well known
|
| 6 |
+
amongst programmers: to define templates. The other use is more obscure: to specify that an expression refers
|
| 7 |
+
to a template function or a type. This regularly trips up programmers that use the %Eigen library, often
|
| 8 |
+
leading to error messages from the compiler that are difficult to understand, such as "expected expression" or
|
| 9 |
+
"no match for operator<".
|
| 10 |
+
|
| 11 |
+
\eigenAutoToc
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
\section TopicTemplateKeywordToDefineTemplates Using the template and typename keywords to define templates
|
| 15 |
+
|
| 16 |
+
The \c template and \c typename keywords are routinely used to define templates. This is not the topic of this
|
| 17 |
+
page as we assume that the reader is aware of this (otherwise consult a C++ book). The following example
|
| 18 |
+
should illustrate this use of the \c template keyword.
|
| 19 |
+
|
| 20 |
+
\code
|
| 21 |
+
template <typename T>
|
| 22 |
+
bool isPositive(T x)
|
| 23 |
+
{
|
| 24 |
+
return x > 0;
|
| 25 |
+
}
|
| 26 |
+
\endcode
|
| 27 |
+
|
| 28 |
+
We could just as well have written <tt>template <class T></tt>; the keywords \c typename and \c class have the
|
| 29 |
+
same meaning in this context.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
\section TopicTemplateKeywordExample An example showing the second use of the template keyword
|
| 33 |
+
|
| 34 |
+
Let us illustrate the second use of the \c template keyword with an example. Suppose we want to write a
|
| 35 |
+
function which copies all entries in the upper triangular part of a matrix into another matrix, while keeping
|
| 36 |
+
the lower triangular part unchanged. A straightforward implementation would be as follows:
|
| 37 |
+
|
| 38 |
+
<table class="example">
|
| 39 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 40 |
+
<tr><td>
|
| 41 |
+
\include TemplateKeyword_simple.cpp
|
| 42 |
+
</td>
|
| 43 |
+
<td>
|
| 44 |
+
\verbinclude TemplateKeyword_simple.out
|
| 45 |
+
</td></tr></table>
|
| 46 |
+
|
| 47 |
+
That works fine, but it is not very flexible. First, it only works with dynamic-size matrices of
|
| 48 |
+
single-precision floats; the function \c copyUpperTriangularPart() does not accept static-size matrices or
|
| 49 |
+
matrices with double-precision numbers. Second, if you use an expression such as
|
| 50 |
+
<tt>mat.topLeftCorner(3,3)</tt> as the parameter \c src, then this is copied into a temporary variable of type
|
| 51 |
+
MatrixXf; this copy can be avoided.
|
| 52 |
+
|
| 53 |
+
As explained in \ref TopicFunctionTakingEigenTypes, both issues can be resolved by making
|
| 54 |
+
\c copyUpperTriangularPart() accept any object of type MatrixBase. This leads to the following code:
|
| 55 |
+
|
| 56 |
+
<table class="example">
|
| 57 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 58 |
+
<tr><td>
|
| 59 |
+
\include TemplateKeyword_flexible.cpp
|
| 60 |
+
</td>
|
| 61 |
+
<td>
|
| 62 |
+
\verbinclude TemplateKeyword_flexible.out
|
| 63 |
+
</td></tr></table>
|
| 64 |
+
|
| 65 |
+
The one line in the body of the function \c copyUpperTriangularPart() shows the second, more obscure use of
|
| 66 |
+
the \c template keyword in C++. Even though it may look strange, the \c template keywords are necessary
|
| 67 |
+
according to the standard. Without it, the compiler may reject the code with an error message like "no match
|
| 68 |
+
for operator<".
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
\section TopicTemplateKeywordExplanation Explanation
|
| 72 |
+
|
| 73 |
+
The reason that the \c template keyword is necessary in the last example has to do with the rules for how
|
| 74 |
+
templates are supposed to be compiled in C++. The compiler has to check the code for correct syntax at the
|
| 75 |
+
point where the template is defined, without knowing the actual value of the template arguments (\c Derived1
|
| 76 |
+
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
|
| 77 |
+
a member template and that the following < symbol is part of the delimiter for the template
|
| 78 |
+
parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the <
|
| 79 |
+
symbol referring to the <tt>operator<()</tt> function. In fact, the compiler should choose the second
|
| 80 |
+
possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
|
| 81 |
+
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
|
| 82 |
+
triangularView</tt>.
|
| 83 |
+
|
| 84 |
+
The precise rules are rather complicated, but ignoring some subtleties we can summarize them as follows:
|
| 85 |
+
- A <em>dependent name</em> is name that depends (directly or indirectly) on a template parameter. In the
|
| 86 |
+
example, \c dst is a dependent name because it is of type <tt>MatrixBase<Derived1></tt> which depends
|
| 87 |
+
on the template parameter \c Derived1.
|
| 88 |
+
- If the code contains either one of the constructs <tt>xxx.yyy</tt> or <tt>xxx->yyy</tt> and \c xxx is a
|
| 89 |
+
dependent name and \c yyy refers to a member template, then the \c template keyword must be used before
|
| 90 |
+
\c yyy, leading to <tt>xxx.template yyy</tt> or <tt>xxx->template yyy</tt>.
|
| 91 |
+
- If the code contains the construct <tt>xxx::yyy</tt> and \c xxx is a dependent name and \c yyy refers to a
|
| 92 |
+
member typedef, then the \c typename keyword must be used before the whole construct, leading to
|
| 93 |
+
<tt>typename xxx::yyy</tt>.
|
| 94 |
+
|
| 95 |
+
As an example where the \c typename keyword is required, consider the following code in \ref TutorialSparse
|
| 96 |
+
for iterating over the non-zero entries of a sparse matrix type:
|
| 97 |
+
|
| 98 |
+
\code
|
| 99 |
+
SparseMatrixType mat(rows,cols);
|
| 100 |
+
for (int k=0; k<mat.outerSize(); ++k)
|
| 101 |
+
for (SparseMatrixType::InnerIterator it(mat,k); it; ++it)
|
| 102 |
+
{
|
| 103 |
+
/* ... */
|
| 104 |
+
}
|
| 105 |
+
\endcode
|
| 106 |
+
|
| 107 |
+
If \c SparseMatrixType depends on a template parameter, then the \c typename keyword is required:
|
| 108 |
+
|
| 109 |
+
\code
|
| 110 |
+
template <typename T>
|
| 111 |
+
void iterateOverSparseMatrix(const SparseMatrix<T>& mat;
|
| 112 |
+
{
|
| 113 |
+
for (int k=0; k<m1.outerSize(); ++k)
|
| 114 |
+
for (typename SparseMatrix<T>::InnerIterator it(mat,k); it; ++it)
|
| 115 |
+
{
|
| 116 |
+
/* ... */
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
\endcode
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
\section TopicTemplateKeywordResources Resources for further reading
|
| 123 |
+
|
| 124 |
+
For more information and a fuller explanation of this topic, the reader may consult the following sources:
|
| 125 |
+
- The book "C++ Template Metaprogramming" by David Abrahams and Aleksey Gurtovoy contains a very good
|
| 126 |
+
explanation in Appendix B ("The typename and template Keywords") which formed the basis for this page.
|
| 127 |
+
- http://pages.cs.wisc.edu/~driscoll/typename.html
|
| 128 |
+
- http://www.parashift.com/c++-faq-lite/templates.html#faq-35.18
|
| 129 |
+
- http://www.comeaucomputing.com/techtalk/templates/#templateprefix
|
| 130 |
+
- http://www.comeaucomputing.com/techtalk/templates/#typename
|
| 131 |
+
|
| 132 |
+
*/
|
| 133 |
+
}
|
include/eigen/doc/TopicAliasing.dox
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TopicAliasing Aliasing
|
| 4 |
+
|
| 5 |
+
In %Eigen, aliasing refers to assignment statement in which the same matrix (or array or vector) appears on the
|
| 6 |
+
left and on the right of the assignment operators. Statements like <tt>mat = 2 * mat;</tt> or <tt>mat =
|
| 7 |
+
mat.transpose();</tt> exhibit aliasing. The aliasing in the first example is harmless, but the aliasing in the
|
| 8 |
+
second example leads to unexpected results. This page explains what aliasing is, when it is harmful, and what
|
| 9 |
+
to do about it.
|
| 10 |
+
|
| 11 |
+
\eigenAutoToc
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
\section TopicAliasingExamples Examples
|
| 15 |
+
|
| 16 |
+
Here is a simple example exhibiting aliasing:
|
| 17 |
+
|
| 18 |
+
<table class="example">
|
| 19 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 20 |
+
<tr><td>
|
| 21 |
+
\include TopicAliasing_block.cpp
|
| 22 |
+
</td>
|
| 23 |
+
<td>
|
| 24 |
+
\verbinclude TopicAliasing_block.out
|
| 25 |
+
</td></tr></table>
|
| 26 |
+
|
| 27 |
+
The output is not what one would expect. The problem is the assignment
|
| 28 |
+
\code
|
| 29 |
+
mat.bottomRightCorner(2,2) = mat.topLeftCorner(2,2);
|
| 30 |
+
\endcode
|
| 31 |
+
This assignment exhibits aliasing: the coefficient \c mat(1,1) appears both in the block
|
| 32 |
+
<tt>mat.bottomRightCorner(2,2)</tt> on the left-hand side of the assignment and the block
|
| 33 |
+
<tt>mat.topLeftCorner(2,2)</tt> on the right-hand side. After the assignment, the (2,2) entry in the bottom
|
| 34 |
+
right corner should have the value of \c mat(1,1) before the assignment, which is 5. However, the output shows
|
| 35 |
+
that \c mat(2,2) is actually 1. The problem is that %Eigen uses lazy evaluation (see
|
| 36 |
+
\ref TopicEigenExpressionTemplates) for <tt>mat.topLeftCorner(2,2)</tt>. The result is similar to
|
| 37 |
+
\code
|
| 38 |
+
mat(1,1) = mat(0,0);
|
| 39 |
+
mat(1,2) = mat(0,1);
|
| 40 |
+
mat(2,1) = mat(1,0);
|
| 41 |
+
mat(2,2) = mat(1,1);
|
| 42 |
+
\endcode
|
| 43 |
+
Thus, \c mat(2,2) is assigned the \e new value of \c mat(1,1) instead of the old value. The next section
|
| 44 |
+
explains how to solve this problem by calling \link DenseBase::eval() eval()\endlink.
|
| 45 |
+
|
| 46 |
+
Aliasing occurs more naturally when trying to shrink a matrix. For example, the expressions <tt>vec =
|
| 47 |
+
vec.head(n)</tt> and <tt>mat = mat.block(i,j,r,c)</tt> exhibit aliasing.
|
| 48 |
+
|
| 49 |
+
In general, aliasing cannot be detected at compile time: if \c mat in the first example were a bit bigger,
|
| 50 |
+
then the blocks would not overlap, and there would be no aliasing problem. However, %Eigen does detect some
|
| 51 |
+
instances of aliasing, albeit at run time. The following example exhibiting aliasing was mentioned in \ref
|
| 52 |
+
TutorialMatrixArithmetic :
|
| 53 |
+
|
| 54 |
+
<table class="example">
|
| 55 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 56 |
+
<tr><td>
|
| 57 |
+
\include tut_arithmetic_transpose_aliasing.cpp
|
| 58 |
+
</td>
|
| 59 |
+
<td>
|
| 60 |
+
\verbinclude tut_arithmetic_transpose_aliasing.out
|
| 61 |
+
</td></tr></table>
|
| 62 |
+
|
| 63 |
+
Again, the output shows the aliasing issue. However, by default %Eigen uses a run-time assertion to detect this
|
| 64 |
+
and exits with a message like
|
| 65 |
+
|
| 66 |
+
\verbatim
|
| 67 |
+
void Eigen::DenseBase<Derived>::checkTransposeAliasing(const OtherDerived&) const
|
| 68 |
+
[with OtherDerived = Eigen::Transpose<Eigen::Matrix<int, 2, 2, 0, 2, 2> >, Derived = Eigen::Matrix<int, 2, 2, 0, 2, 2>]:
|
| 69 |
+
Assertion `(!internal::check_transpose_aliasing_selector<Scalar,internal::blas_traits<Derived>::IsTransposed,OtherDerived>::run(internal::extract_data(derived()), other))
|
| 70 |
+
&& "aliasing detected during transposition, use transposeInPlace() or evaluate the rhs into a temporary using .eval()"' failed.
|
| 71 |
+
\endverbatim
|
| 72 |
+
|
| 73 |
+
The user can turn %Eigen's run-time assertions like the one to detect this aliasing problem off by defining the
|
| 74 |
+
EIGEN_NO_DEBUG macro, and the above program was compiled with this macro turned off in order to illustrate the
|
| 75 |
+
aliasing problem. See \ref TopicAssertions for more information about %Eigen's run-time assertions.
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
\section TopicAliasingSolution Resolving aliasing issues
|
| 79 |
+
|
| 80 |
+
If you understand the cause of the aliasing issue, then it is obvious what must happen to solve it: %Eigen has
|
| 81 |
+
to evaluate the right-hand side fully into a temporary matrix/array and then assign it to the left-hand
|
| 82 |
+
side. The function \link DenseBase::eval() eval() \endlink does precisely that.
|
| 83 |
+
|
| 84 |
+
For example, here is the corrected version of the first example above:
|
| 85 |
+
|
| 86 |
+
<table class="example">
|
| 87 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 88 |
+
<tr><td>
|
| 89 |
+
\include TopicAliasing_block_correct.cpp
|
| 90 |
+
</td>
|
| 91 |
+
<td>
|
| 92 |
+
\verbinclude TopicAliasing_block_correct.out
|
| 93 |
+
</td></tr></table>
|
| 94 |
+
|
| 95 |
+
Now, \c mat(2,2) equals 5 after the assignment, as it should be.
|
| 96 |
+
|
| 97 |
+
The same solution also works for the second example, with the transpose: simply replace the line
|
| 98 |
+
<tt>a = a.transpose();</tt> with <tt>a = a.transpose().eval();</tt>. However, in this common case there is a
|
| 99 |
+
better solution. %Eigen provides the special-purpose function
|
| 100 |
+
\link DenseBase::transposeInPlace() transposeInPlace() \endlink which replaces a matrix by its transpose.
|
| 101 |
+
This is shown below:
|
| 102 |
+
|
| 103 |
+
<table class="example">
|
| 104 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 105 |
+
<tr><td>
|
| 106 |
+
\include tut_arithmetic_transpose_inplace.cpp
|
| 107 |
+
</td>
|
| 108 |
+
<td>
|
| 109 |
+
\verbinclude tut_arithmetic_transpose_inplace.out
|
| 110 |
+
</td></tr></table>
|
| 111 |
+
|
| 112 |
+
If an xxxInPlace() function is available, then it is best to use it, because it indicates more clearly what you
|
| 113 |
+
are doing. This may also allow %Eigen to optimize more aggressively. These are some of the xxxInPlace()
|
| 114 |
+
functions provided:
|
| 115 |
+
|
| 116 |
+
<table class="manual">
|
| 117 |
+
<tr><th>Original function</th><th>In-place function</th></tr>
|
| 118 |
+
<tr> <td> MatrixBase::adjoint() </td> <td> MatrixBase::adjointInPlace() </td> </tr>
|
| 119 |
+
<tr class="alt"> <td> DenseBase::reverse() </td> <td> DenseBase::reverseInPlace() </td> </tr>
|
| 120 |
+
<tr> <td> LDLT::solve() </td> <td> LDLT::solveInPlace() </td> </tr>
|
| 121 |
+
<tr class="alt"> <td> LLT::solve() </td> <td> LLT::solveInPlace() </td> </tr>
|
| 122 |
+
<tr> <td> TriangularView::solve() </td> <td> TriangularView::solveInPlace() </td> </tr>
|
| 123 |
+
<tr class="alt"> <td> DenseBase::transpose() </td> <td> DenseBase::transposeInPlace() </td> </tr>
|
| 124 |
+
</table>
|
| 125 |
+
|
| 126 |
+
In the special case where a matrix or vector is shrunk using an expression like <tt>vec = vec.head(n)</tt>,
|
| 127 |
+
you can use \link PlainObjectBase::conservativeResize() conservativeResize() \endlink.
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
\section TopicAliasingCwise Aliasing and component-wise operations
|
| 131 |
+
|
| 132 |
+
As explained above, it may be dangerous if the same matrix or array occurs on both the left-hand side and the
|
| 133 |
+
right-hand side of an assignment operator, and it is then often necessary to evaluate the right-hand side
|
| 134 |
+
explicitly. However, applying component-wise operations (such as matrix addition, scalar multiplication and
|
| 135 |
+
array multiplication) is safe.
|
| 136 |
+
|
| 137 |
+
The following example has only component-wise operations. Thus, there is no need for \link DenseBase::eval()
|
| 138 |
+
eval() \endlink even though the same matrix appears on both sides of the assignments.
|
| 139 |
+
|
| 140 |
+
<table class="example">
|
| 141 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 142 |
+
<tr><td>
|
| 143 |
+
\include TopicAliasing_cwise.cpp
|
| 144 |
+
</td>
|
| 145 |
+
<td>
|
| 146 |
+
\verbinclude TopicAliasing_cwise.out
|
| 147 |
+
</td></tr></table>
|
| 148 |
+
|
| 149 |
+
In general, an assignment is safe if the (i,j) entry of the expression on the right-hand side depends only on
|
| 150 |
+
the (i,j) entry of the matrix or array on the left-hand side and not on any other entries. In that case it is
|
| 151 |
+
not necessary to evaluate the right-hand side explicitly.
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
\section TopicAliasingMatrixMult Aliasing and matrix multiplication
|
| 155 |
+
|
| 156 |
+
Matrix multiplication is the only operation in %Eigen that assumes aliasing by default, <strong>under the
|
| 157 |
+
condition that the destination matrix is not resized</strong>.
|
| 158 |
+
Thus, if \c matA is a \b squared matrix, then the statement <tt>matA = matA * matA;</tt> is safe.
|
| 159 |
+
All other operations in %Eigen assume that there are no aliasing problems,
|
| 160 |
+
either because the result is assigned to a different matrix or because it is a component-wise operation.
|
| 161 |
+
|
| 162 |
+
<table class="example">
|
| 163 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 164 |
+
<tr><td>
|
| 165 |
+
\include TopicAliasing_mult1.cpp
|
| 166 |
+
</td>
|
| 167 |
+
<td>
|
| 168 |
+
\verbinclude TopicAliasing_mult1.out
|
| 169 |
+
</td></tr></table>
|
| 170 |
+
|
| 171 |
+
However, this comes at a price. When executing the expression <tt>matA = matA * matA</tt>, %Eigen evaluates the
|
| 172 |
+
product in a temporary matrix which is assigned to \c matA after the computation. This is fine. But %Eigen does
|
| 173 |
+
the same when the product is assigned to a different matrix (e.g., <tt>matB = matA * matA</tt>). In that case,
|
| 174 |
+
it is more efficient to evaluate the product directly into \c matB instead of evaluating it first into a
|
| 175 |
+
temporary matrix and copying that matrix to \c matB.
|
| 176 |
+
|
| 177 |
+
The user can indicate with the \link MatrixBase::noalias() noalias()\endlink function that there is no
|
| 178 |
+
aliasing, as follows: <tt>matB.noalias() = matA * matA</tt>. This allows %Eigen to evaluate the matrix product
|
| 179 |
+
<tt>matA * matA</tt> directly into \c matB.
|
| 180 |
+
|
| 181 |
+
<table class="example">
|
| 182 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 183 |
+
<tr><td>
|
| 184 |
+
\include TopicAliasing_mult2.cpp
|
| 185 |
+
</td>
|
| 186 |
+
<td>
|
| 187 |
+
\verbinclude TopicAliasing_mult2.out
|
| 188 |
+
</td></tr></table>
|
| 189 |
+
|
| 190 |
+
Of course, you should not use \c noalias() when there is in fact aliasing taking place. If you do, then you
|
| 191 |
+
may get wrong results:
|
| 192 |
+
|
| 193 |
+
<table class="example">
|
| 194 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 195 |
+
<tr><td>
|
| 196 |
+
\include TopicAliasing_mult3.cpp
|
| 197 |
+
</td>
|
| 198 |
+
<td>
|
| 199 |
+
\verbinclude TopicAliasing_mult3.out
|
| 200 |
+
</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Moreover, starting in Eigen 3.3, aliasing is \b not assumed if the destination matrix is resized and the product is not directly assigned to the destination.
|
| 203 |
+
Therefore, the following example is also wrong:
|
| 204 |
+
|
| 205 |
+
<table class="example">
|
| 206 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 207 |
+
<tr><td>
|
| 208 |
+
\include TopicAliasing_mult4.cpp
|
| 209 |
+
</td>
|
| 210 |
+
<td>
|
| 211 |
+
\verbinclude TopicAliasing_mult4.out
|
| 212 |
+
</td></tr></table>
|
| 213 |
+
|
| 214 |
+
As for any aliasing issue, you can resolve it by explicitly evaluating the expression prior to assignment:
|
| 215 |
+
<table class="example">
|
| 216 |
+
<tr><th>Example</th><th>Output</th></tr>
|
| 217 |
+
<tr><td>
|
| 218 |
+
\include TopicAliasing_mult5.cpp
|
| 219 |
+
</td>
|
| 220 |
+
<td>
|
| 221 |
+
\verbinclude TopicAliasing_mult5.out
|
| 222 |
+
</td></tr></table>
|
| 223 |
+
|
| 224 |
+
\section TopicAliasingSummary Summary
|
| 225 |
+
|
| 226 |
+
Aliasing occurs when the same matrix or array coefficients appear both on the left- and the right-hand side of
|
| 227 |
+
an assignment operator.
|
| 228 |
+
- Aliasing is harmless with coefficient-wise computations; this includes scalar multiplication and matrix or
|
| 229 |
+
array addition.
|
| 230 |
+
- When you multiply two matrices, %Eigen assumes that aliasing occurs. If you know that there is no aliasing,
|
| 231 |
+
then you can use \link MatrixBase::noalias() noalias()\endlink.
|
| 232 |
+
- In all other situations, %Eigen assumes that there is no aliasing issue and thus gives the wrong result if
|
| 233 |
+
aliasing does in fact occur. To prevent this, you have to use \link DenseBase::eval() eval() \endlink or
|
| 234 |
+
one of the xxxInPlace() functions.
|
| 235 |
+
|
| 236 |
+
*/
|
| 237 |
+
}
|
include/eigen/doc/TopicCMakeGuide.dox
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
|
| 5 |
+
\page TopicCMakeGuide Using %Eigen in CMake Projects
|
| 6 |
+
|
| 7 |
+
%Eigen provides native CMake support which allows the library to be easily
|
| 8 |
+
used in CMake projects.
|
| 9 |
+
|
| 10 |
+
\note %CMake 3.5 (or later) is required to enable this functionality.
|
| 11 |
+
|
| 12 |
+
%Eigen exports a CMake target called `Eigen3::Eigen` which can be imported
|
| 13 |
+
using the `find_package` CMake command and used by calling
|
| 14 |
+
`target_link_libraries` as in the following example:
|
| 15 |
+
\code{.cmake}
|
| 16 |
+
cmake_minimum_required (VERSION 3.5)
|
| 17 |
+
project (myproject)
|
| 18 |
+
|
| 19 |
+
find_package (Eigen3 REQUIRED NO_MODULE)
|
| 20 |
+
|
| 21 |
+
add_executable (example example.cpp)
|
| 22 |
+
target_link_libraries (example Eigen3::Eigen)
|
| 23 |
+
\endcode
|
| 24 |
+
|
| 25 |
+
The above code snippet must be placed in a file called `CMakeLists.txt` alongside
|
| 26 |
+
`example.cpp`. After running
|
| 27 |
+
\code{.sh}
|
| 28 |
+
$ cmake path-to-example-directory
|
| 29 |
+
\endcode
|
| 30 |
+
CMake will produce project files that generate an executable called `example`.
|
| 31 |
+
Here, `path-to-example-directory` is the path to the directory that contains
|
| 32 |
+
both `CMakeLists.txt` and `example.cpp`. Note that if you have multiple
|
| 33 |
+
instances of %Eigen installed, `find_package` will use the first one
|
| 34 |
+
encountered. To request a specific version of %Eigen, use the `<version>`
|
| 35 |
+
option in `find_package`:
|
| 36 |
+
```
|
| 37 |
+
find_package(Eigen3 3.4 REQUIRED NO_MODULE)
|
| 38 |
+
```
|
| 39 |
+
or to support a range of versions:
|
| 40 |
+
```
|
| 41 |
+
find_package(Eigen3 3.3...5 REQUIRED NO_MODULE) # Any version >=3.3.0 but <6.0.0.
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
Do not forget to set the <a href="https://cmake.org/cmake/help/v3.7/variable/CMAKE_PREFIX_PATH.html">\c CMAKE_PREFIX_PATH </a> variable if Eigen is not installed in a default location or if you want to pick a specific version. For instance:
|
| 45 |
+
\code{.sh}
|
| 46 |
+
$ cmake path-to-example-directory -DCMAKE_PREFIX_PATH=$HOME/mypackages
|
| 47 |
+
\endcode
|
| 48 |
+
An alternative is to set the \c Eigen3_DIR cmake's variable to the respective path containing the \c Eigen3*.cmake files. For instance:
|
| 49 |
+
\code{.sh}
|
| 50 |
+
$ cmake path-to-example-directory -DEigen3_DIR=$HOME/mypackages/share/eigen3/cmake/
|
| 51 |
+
\endcode
|
| 52 |
+
|
| 53 |
+
If the `REQUIRED` option is omitted when locating %Eigen using
|
| 54 |
+
`find_package`, one can check whether the package was found as follows:
|
| 55 |
+
\code{.cmake}
|
| 56 |
+
find_package (Eigen3 NO_MODULE)
|
| 57 |
+
|
| 58 |
+
if (TARGET Eigen3::Eigen)
|
| 59 |
+
# Use the imported target
|
| 60 |
+
endif (TARGET Eigen3::Eigen)
|
| 61 |
+
\endcode
|
| 62 |
+
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
}
|
include/eigen/doc/TopicVectorization.dox
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \page TopicVectorization Vectorization
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
TODO: write this dox page!
|
| 7 |
+
|
| 8 |
+
*/
|
| 9 |
+
}
|
include/eigen/doc/TutorialAdvancedInitialization.dox
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialAdvancedInitialization Advanced initialization
|
| 4 |
+
|
| 5 |
+
This page discusses several advanced methods for initializing matrices. It gives more details on the
|
| 6 |
+
comma-initializer, which was introduced before. It also explains how to get special matrices such as the
|
| 7 |
+
identity matrix and the zero matrix.
|
| 8 |
+
|
| 9 |
+
\eigenAutoToc
|
| 10 |
+
|
| 11 |
+
\section TutorialAdvancedInitializationCommaInitializer The comma initializer
|
| 12 |
+
|
| 13 |
+
Eigen offers a comma initializer syntax which allows the user to easily set all the coefficients of a matrix,
|
| 14 |
+
vector or array. Simply list the coefficients, starting at the top-left corner and moving from left to right
|
| 15 |
+
and from the top to the bottom. The size of the object needs to be specified beforehand. If you list too few
|
| 16 |
+
or too many coefficients, Eigen will complain.
|
| 17 |
+
|
| 18 |
+
<table class="example">
|
| 19 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 20 |
+
<tr><td>
|
| 21 |
+
\include Tutorial_commainit_01.cpp
|
| 22 |
+
</td>
|
| 23 |
+
<td>
|
| 24 |
+
\verbinclude Tutorial_commainit_01.out
|
| 25 |
+
</td></tr></table>
|
| 26 |
+
|
| 27 |
+
Moreover, the elements of the initialization list may themselves be vectors or matrices. A common use is
|
| 28 |
+
to join vectors or matrices together. For example, here is how to join two row vectors together. Remember
|
| 29 |
+
that you have to set the size before you can use the comma initializer.
|
| 30 |
+
|
| 31 |
+
<table class="example">
|
| 32 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 33 |
+
<tr><td>
|
| 34 |
+
\include Tutorial_AdvancedInitialization_Join.cpp
|
| 35 |
+
</td>
|
| 36 |
+
<td>
|
| 37 |
+
\verbinclude Tutorial_AdvancedInitialization_Join.out
|
| 38 |
+
</td></tr></table>
|
| 39 |
+
|
| 40 |
+
We can use the same technique to initialize matrices with a block structure.
|
| 41 |
+
|
| 42 |
+
<table class="example">
|
| 43 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 44 |
+
<tr><td>
|
| 45 |
+
\include Tutorial_AdvancedInitialization_Block.cpp
|
| 46 |
+
</td>
|
| 47 |
+
<td>
|
| 48 |
+
\verbinclude Tutorial_AdvancedInitialization_Block.out
|
| 49 |
+
</td></tr></table>
|
| 50 |
+
|
| 51 |
+
The comma initializer can also be used to fill block expressions such as <tt>m.row(i)</tt>. Here is a more
|
| 52 |
+
complicated way to get the same result as in the first example above:
|
| 53 |
+
|
| 54 |
+
<table class="example">
|
| 55 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 56 |
+
<tr><td>
|
| 57 |
+
\include Tutorial_commainit_01b.cpp
|
| 58 |
+
</td>
|
| 59 |
+
<td>
|
| 60 |
+
\verbinclude Tutorial_commainit_01b.out
|
| 61 |
+
</td></tr></table>
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
\section TutorialAdvancedInitializationSpecialMatrices Special matrices and arrays
|
| 65 |
+
|
| 66 |
+
The Matrix and Array classes have static methods like \link DenseBase::Zero() Zero()\endlink, which can be
|
| 67 |
+
used to initialize all coefficients to zero. There are three variants. The first variant takes no arguments
|
| 68 |
+
and can only be used for fixed-size objects. If you want to initialize a dynamic-size object to zero, you need
|
| 69 |
+
to specify the size. Thus, the second variant requires one argument and can be used for one-dimensional
|
| 70 |
+
dynamic-size objects, while the third variant requires two arguments and can be used for two-dimensional
|
| 71 |
+
objects. All three variants are illustrated in the following example:
|
| 72 |
+
|
| 73 |
+
<table class="example">
|
| 74 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 75 |
+
<tr><td>
|
| 76 |
+
\include Tutorial_AdvancedInitialization_Zero.cpp
|
| 77 |
+
</td>
|
| 78 |
+
<td>
|
| 79 |
+
\verbinclude Tutorial_AdvancedInitialization_Zero.out
|
| 80 |
+
</td></tr></table>
|
| 81 |
+
|
| 82 |
+
Similarly, the static method \link DenseBase::Constant() Constant\endlink(value) sets all coefficients to \c value.
|
| 83 |
+
If the size of the object needs to be specified, the additional arguments go before the \c value
|
| 84 |
+
argument, as in <tt>MatrixXd::Constant(rows, cols, value)</tt>. The method \link DenseBase::Random() Random()
|
| 85 |
+
\endlink fills the matrix or array with random coefficients. The identity matrix can be obtained by calling
|
| 86 |
+
\link MatrixBase::Identity() Identity()\endlink; this method is only available for Matrix, not for Array,
|
| 87 |
+
because "identity matrix" is a linear algebra concept. The method
|
| 88 |
+
\link DenseBase::LinSpaced LinSpaced\endlink(size, low, high) is only available for vectors and
|
| 89 |
+
one-dimensional arrays; it yields a vector of the specified size whose coefficients are equally spaced between
|
| 90 |
+
\c low and \c high. The method \c LinSpaced() is illustrated in the following example, which prints a table
|
| 91 |
+
with angles in degrees, the corresponding angle in radians, and their sine and cosine.
|
| 92 |
+
|
| 93 |
+
<table class="example">
|
| 94 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 95 |
+
<tr><td>
|
| 96 |
+
\include Tutorial_AdvancedInitialization_LinSpaced.cpp
|
| 97 |
+
</td>
|
| 98 |
+
<td>
|
| 99 |
+
\verbinclude Tutorial_AdvancedInitialization_LinSpaced.out
|
| 100 |
+
</td></tr></table>
|
| 101 |
+
|
| 102 |
+
This example shows that objects like the ones returned by LinSpaced() can be assigned to variables (and
|
| 103 |
+
expressions). Eigen defines utility functions like \link DenseBase::setZero() setZero()\endlink,
|
| 104 |
+
\link MatrixBase::setIdentity() \endlink and \link DenseBase::setLinSpaced() \endlink to do this
|
| 105 |
+
conveniently. The following example contrasts three ways to construct the matrix
|
| 106 |
+
\f$ J = \bigl[ \begin{smallmatrix} O & I \\ I & O \end{smallmatrix} \bigr] \f$: using static methods and
|
| 107 |
+
assignment, using static methods and the comma-initializer, or using the setXxx() methods.
|
| 108 |
+
|
| 109 |
+
<table class="example">
|
| 110 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 111 |
+
<tr><td>
|
| 112 |
+
\include Tutorial_AdvancedInitialization_ThreeWays.cpp
|
| 113 |
+
</td>
|
| 114 |
+
<td>
|
| 115 |
+
\verbinclude Tutorial_AdvancedInitialization_ThreeWays.out
|
| 116 |
+
</td></tr></table>
|
| 117 |
+
|
| 118 |
+
A summary of all pre-defined matrix, vector and array objects can be found in the \ref QuickRefPage.
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
\section TutorialAdvancedInitializationTemporaryObjects Usage as temporary objects
|
| 122 |
+
|
| 123 |
+
As shown above, static methods as Zero() and Constant() can be used to initialize variables at the time of
|
| 124 |
+
declaration or at the right-hand side of an assignment operator. You can think of these methods as returning a
|
| 125 |
+
matrix or array; in fact, they return so-called \ref TopicEigenExpressionTemplates "expression objects" which
|
| 126 |
+
evaluate to a matrix or array when needed, so that this syntax does not incur any overhead.
|
| 127 |
+
|
| 128 |
+
These expressions can also be used as a temporary object. The second example in
|
| 129 |
+
the \ref GettingStarted guide, which we reproduce here, already illustrates this.
|
| 130 |
+
|
| 131 |
+
<table class="example">
|
| 132 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 133 |
+
<tr><td>
|
| 134 |
+
\include QuickStart_example2_dynamic.cpp
|
| 135 |
+
</td>
|
| 136 |
+
<td>
|
| 137 |
+
\verbinclude QuickStart_example2_dynamic.out
|
| 138 |
+
</td></tr></table>
|
| 139 |
+
|
| 140 |
+
The expression <tt>m + MatrixXf::Constant(3,3,1.2)</tt> constructs the 3-by-3 matrix expression with all its coefficients
|
| 141 |
+
equal to 1.2 plus the corresponding coefficient of \a m.
|
| 142 |
+
|
| 143 |
+
The comma-initializer, too, can also be used to construct temporary objects. The following example constructs a random
|
| 144 |
+
matrix of size 2-by-3, and then multiplies this matrix on the left with
|
| 145 |
+
\f$ \bigl[ \begin{smallmatrix} 0 & 1 \\ 1 & 0 \end{smallmatrix} \bigr] \f$.
|
| 146 |
+
|
| 147 |
+
<table class="example">
|
| 148 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 149 |
+
<tr><td>
|
| 150 |
+
\include Tutorial_AdvancedInitialization_CommaTemporary.cpp
|
| 151 |
+
</td>
|
| 152 |
+
<td>
|
| 153 |
+
\verbinclude Tutorial_AdvancedInitialization_CommaTemporary.out
|
| 154 |
+
</td></tr></table>
|
| 155 |
+
|
| 156 |
+
The \link CommaInitializer::finished() finished() \endlink method is necessary here to get the actual matrix
|
| 157 |
+
object once the comma initialization of our temporary submatrix is done.
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
*/
|
| 161 |
+
|
| 162 |
+
}
|
include/eigen/doc/TutorialBlockOperations.dox
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialBlockOperations Block operations
|
| 4 |
+
|
| 5 |
+
This page explains the essentials of block operations.
|
| 6 |
+
A block is a rectangular part of a matrix or array. Blocks expressions can be used both
|
| 7 |
+
as rvalues and as lvalues. As usual with Eigen expressions, this abstraction has zero runtime cost
|
| 8 |
+
provided that you let your compiler optimize.
|
| 9 |
+
|
| 10 |
+
\eigenAutoToc
|
| 11 |
+
|
| 12 |
+
\section TutorialBlockOperationsUsing Using block operations
|
| 13 |
+
|
| 14 |
+
The most general block operation in Eigen is called \link DenseBase::block() .block() \endlink.
|
| 15 |
+
There are two versions, whose syntax is as follows:
|
| 16 |
+
|
| 17 |
+
<table class="manual">
|
| 18 |
+
<tr><th>\b %Block \b operation</td>
|
| 19 |
+
<th>Version constructing a \n dynamic-size block expression</th>
|
| 20 |
+
<th>Version constructing a \n fixed-size block expression</th></tr>
|
| 21 |
+
<tr><td>%Block of size <tt>(p,q)</tt>, starting at <tt>(i,j)</tt></td>
|
| 22 |
+
<td>\code
|
| 23 |
+
matrix.block(i,j,p,q);\endcode </td>
|
| 24 |
+
<td>\code
|
| 25 |
+
matrix.block<p,q>(i,j);\endcode </td>
|
| 26 |
+
</tr>
|
| 27 |
+
</table>
|
| 28 |
+
|
| 29 |
+
As always in Eigen, indices start at 0.
|
| 30 |
+
|
| 31 |
+
Both versions can be used on fixed-size and dynamic-size matrices and arrays.
|
| 32 |
+
These two expressions are semantically equivalent.
|
| 33 |
+
The only difference is that the fixed-size version will typically give you faster code if the block size is small,
|
| 34 |
+
but requires this size to be known at compile time.
|
| 35 |
+
|
| 36 |
+
The following program uses the dynamic-size and fixed-size versions to print the values of several blocks inside a
|
| 37 |
+
matrix.
|
| 38 |
+
|
| 39 |
+
<table class="example">
|
| 40 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 41 |
+
<tr><td>
|
| 42 |
+
\include Tutorial_BlockOperations_print_block.cpp
|
| 43 |
+
</td>
|
| 44 |
+
<td>
|
| 45 |
+
\verbinclude Tutorial_BlockOperations_print_block.out
|
| 46 |
+
</td></tr></table>
|
| 47 |
+
|
| 48 |
+
In the above example the \link DenseBase::block() .block() \endlink function was employed as a \em rvalue, i.e.
|
| 49 |
+
it was only read from. However, blocks can also be used as \em lvalues, meaning that you can assign to a block.
|
| 50 |
+
|
| 51 |
+
This is illustrated in the following example. This example also demonstrates blocks in arrays, which works exactly like the above-demonstrated blocks in matrices.
|
| 52 |
+
|
| 53 |
+
<table class="example">
|
| 54 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 55 |
+
<tr><td>
|
| 56 |
+
\include Tutorial_BlockOperations_block_assignment.cpp
|
| 57 |
+
</td>
|
| 58 |
+
<td>
|
| 59 |
+
\verbinclude Tutorial_BlockOperations_block_assignment.out
|
| 60 |
+
</td></tr></table>
|
| 61 |
+
|
| 62 |
+
While the \link DenseBase::block() .block() \endlink method can be used for any block operation, there are
|
| 63 |
+
other methods for special cases, providing more specialized API and/or better performance. On the topic of performance, all what
|
| 64 |
+
matters is that you give Eigen as much information as possible at compile time. For example, if your block is a single whole column in a matrix,
|
| 65 |
+
using the specialized \link DenseBase::col() .col() \endlink function described below lets Eigen know that, which can give it optimization opportunities.
|
| 66 |
+
|
| 67 |
+
The rest of this page describes these specialized methods.
|
| 68 |
+
|
| 69 |
+
\section TutorialBlockOperationsSyntaxColumnRows Columns and rows
|
| 70 |
+
|
| 71 |
+
Individual columns and rows are special cases of blocks. Eigen provides methods to easily address them:
|
| 72 |
+
\link DenseBase::col() .col() \endlink and \link DenseBase::row() .row()\endlink.
|
| 73 |
+
|
| 74 |
+
<table class="manual">
|
| 75 |
+
<tr><th>%Block operation</th>
|
| 76 |
+
<th>Method</th>
|
| 77 |
+
<tr><td>i<sup>th</sup> row
|
| 78 |
+
\link DenseBase::row() * \endlink</td>
|
| 79 |
+
<td>\code
|
| 80 |
+
matrix.row(i);\endcode </td>
|
| 81 |
+
</tr>
|
| 82 |
+
<tr><td>j<sup>th</sup> column
|
| 83 |
+
\link DenseBase::col() * \endlink</td>
|
| 84 |
+
<td>\code
|
| 85 |
+
matrix.col(j);\endcode </td>
|
| 86 |
+
</tr>
|
| 87 |
+
</table>
|
| 88 |
+
|
| 89 |
+
The argument for \p col() and \p row() is the index of the column or row to be accessed. As always in Eigen, indices start at 0.
|
| 90 |
+
|
| 91 |
+
<table class="example">
|
| 92 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 93 |
+
<tr><td>
|
| 94 |
+
\include Tutorial_BlockOperations_colrow.cpp
|
| 95 |
+
</td>
|
| 96 |
+
<td>
|
| 97 |
+
\verbinclude Tutorial_BlockOperations_colrow.out
|
| 98 |
+
</td></tr></table>
|
| 99 |
+
|
| 100 |
+
That example also demonstrates that block expressions (here columns) can be used in arithmetic like any other expression.
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
\section TutorialBlockOperationsSyntaxCorners Corner-related operations
|
| 104 |
+
|
| 105 |
+
Eigen also provides special methods for blocks that are flushed against one of the corners or sides of a
|
| 106 |
+
matrix or array. For instance, \link DenseBase::topLeftCorner() .topLeftCorner() \endlink can be used to refer
|
| 107 |
+
to a block in the top-left corner of a matrix.
|
| 108 |
+
|
| 109 |
+
The different possibilities are summarized in the following table:
|
| 110 |
+
|
| 111 |
+
<table class="manual">
|
| 112 |
+
<tr><th>%Block \b operation</td>
|
| 113 |
+
<th>Version constructing a \n dynamic-size block expression</th>
|
| 114 |
+
<th>Version constructing a \n fixed-size block expression</th></tr>
|
| 115 |
+
<tr><td>Top-left p by q block \link DenseBase::topLeftCorner() * \endlink</td>
|
| 116 |
+
<td>\code
|
| 117 |
+
matrix.topLeftCorner(p,q);\endcode </td>
|
| 118 |
+
<td>\code
|
| 119 |
+
matrix.topLeftCorner<p,q>();\endcode </td>
|
| 120 |
+
</tr>
|
| 121 |
+
<tr><td>Bottom-left p by q block
|
| 122 |
+
\link DenseBase::bottomLeftCorner() * \endlink</td>
|
| 123 |
+
<td>\code
|
| 124 |
+
matrix.bottomLeftCorner(p,q);\endcode </td>
|
| 125 |
+
<td>\code
|
| 126 |
+
matrix.bottomLeftCorner<p,q>();\endcode </td>
|
| 127 |
+
</tr>
|
| 128 |
+
<tr><td>Top-right p by q block
|
| 129 |
+
\link DenseBase::topRightCorner() * \endlink</td>
|
| 130 |
+
<td>\code
|
| 131 |
+
matrix.topRightCorner(p,q);\endcode </td>
|
| 132 |
+
<td>\code
|
| 133 |
+
matrix.topRightCorner<p,q>();\endcode </td>
|
| 134 |
+
</tr>
|
| 135 |
+
<tr><td>Bottom-right p by q block
|
| 136 |
+
\link DenseBase::bottomRightCorner() * \endlink</td>
|
| 137 |
+
<td>\code
|
| 138 |
+
matrix.bottomRightCorner(p,q);\endcode </td>
|
| 139 |
+
<td>\code
|
| 140 |
+
matrix.bottomRightCorner<p,q>();\endcode </td>
|
| 141 |
+
</tr>
|
| 142 |
+
<tr><td>%Block containing the first q rows
|
| 143 |
+
\link DenseBase::topRows() * \endlink</td>
|
| 144 |
+
<td>\code
|
| 145 |
+
matrix.topRows(q);\endcode </td>
|
| 146 |
+
<td>\code
|
| 147 |
+
matrix.topRows<q>();\endcode </td>
|
| 148 |
+
</tr>
|
| 149 |
+
<tr><td>%Block containing the last q rows
|
| 150 |
+
\link DenseBase::bottomRows() * \endlink</td>
|
| 151 |
+
<td>\code
|
| 152 |
+
matrix.bottomRows(q);\endcode </td>
|
| 153 |
+
<td>\code
|
| 154 |
+
matrix.bottomRows<q>();\endcode </td>
|
| 155 |
+
</tr>
|
| 156 |
+
<tr><td>%Block containing the first p columns
|
| 157 |
+
\link DenseBase::leftCols() * \endlink</td>
|
| 158 |
+
<td>\code
|
| 159 |
+
matrix.leftCols(p);\endcode </td>
|
| 160 |
+
<td>\code
|
| 161 |
+
matrix.leftCols<p>();\endcode </td>
|
| 162 |
+
</tr>
|
| 163 |
+
<tr><td>%Block containing the last q columns
|
| 164 |
+
\link DenseBase::rightCols() * \endlink</td>
|
| 165 |
+
<td>\code
|
| 166 |
+
matrix.rightCols(q);\endcode </td>
|
| 167 |
+
<td>\code
|
| 168 |
+
matrix.rightCols<q>();\endcode </td>
|
| 169 |
+
</tr>
|
| 170 |
+
<tr><td>%Block containing the q columns starting from i
|
| 171 |
+
\link DenseBase::middleCols() * \endlink</td>
|
| 172 |
+
<td>\code
|
| 173 |
+
matrix.middleCols(i,q);\endcode </td>
|
| 174 |
+
<td>\code
|
| 175 |
+
matrix.middleCols<q>(i);\endcode </td>
|
| 176 |
+
</tr>
|
| 177 |
+
<tr><td>%Block containing the q rows starting from i
|
| 178 |
+
\link DenseBase::middleRows() * \endlink</td>
|
| 179 |
+
<td>\code
|
| 180 |
+
matrix.middleRows(i,q);\endcode </td>
|
| 181 |
+
<td>\code
|
| 182 |
+
matrix.middleRows<q>(i);\endcode </td>
|
| 183 |
+
</tr>
|
| 184 |
+
</table>
|
| 185 |
+
|
| 186 |
+
Here is a simple example illustrating the use of the operations presented above:
|
| 187 |
+
|
| 188 |
+
<table class="example">
|
| 189 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 190 |
+
<tr><td>
|
| 191 |
+
\include Tutorial_BlockOperations_corner.cpp
|
| 192 |
+
</td>
|
| 193 |
+
<td>
|
| 194 |
+
\verbinclude Tutorial_BlockOperations_corner.out
|
| 195 |
+
</td></tr></table>
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
\section TutorialBlockOperationsSyntaxVectors Block operations for vectors
|
| 199 |
+
|
| 200 |
+
Eigen also provides a set of block operations designed specifically for the special case of vectors and one-dimensional arrays:
|
| 201 |
+
|
| 202 |
+
<table class="manual">
|
| 203 |
+
<tr><th> %Block operation</th>
|
| 204 |
+
<th>Version constructing a \n dynamic-size block expression</th>
|
| 205 |
+
<th>Version constructing a \n fixed-size block expression</th></tr>
|
| 206 |
+
<tr><td>%Block containing the first \p n elements
|
| 207 |
+
\link DenseBase::head() * \endlink</td>
|
| 208 |
+
<td>\code
|
| 209 |
+
vector.head(n);\endcode </td>
|
| 210 |
+
<td>\code
|
| 211 |
+
vector.head<n>();\endcode </td>
|
| 212 |
+
</tr>
|
| 213 |
+
<tr><td>%Block containing the last \p n elements
|
| 214 |
+
\link DenseBase::tail() * \endlink</td>
|
| 215 |
+
<td>\code
|
| 216 |
+
vector.tail(n);\endcode </td>
|
| 217 |
+
<td>\code
|
| 218 |
+
vector.tail<n>();\endcode </td>
|
| 219 |
+
</tr>
|
| 220 |
+
<tr><td>%Block containing \p n elements, starting at position \p i
|
| 221 |
+
\link DenseBase::segment() * \endlink</td>
|
| 222 |
+
<td>\code
|
| 223 |
+
vector.segment(i,n);\endcode </td>
|
| 224 |
+
<td>\code
|
| 225 |
+
vector.segment<n>(i);\endcode </td>
|
| 226 |
+
</tr>
|
| 227 |
+
</table>
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
An example is presented below:
|
| 231 |
+
<table class="example">
|
| 232 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 233 |
+
<tr><td>
|
| 234 |
+
\include Tutorial_BlockOperations_vector.cpp
|
| 235 |
+
</td>
|
| 236 |
+
<td>
|
| 237 |
+
\verbinclude Tutorial_BlockOperations_vector.out
|
| 238 |
+
</td></tr></table>
|
| 239 |
+
|
| 240 |
+
*/
|
| 241 |
+
|
| 242 |
+
}
|
include/eigen/doc/TutorialGeometry.dox
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialGeometry Space transformations
|
| 4 |
+
|
| 5 |
+
In this page, we will introduce the many possibilities offered by the \ref Geometry_Module "geometry module" to deal with 2D and 3D rotations and projective or affine transformations.
|
| 6 |
+
|
| 7 |
+
\eigenAutoToc
|
| 8 |
+
|
| 9 |
+
Eigen's Geometry module provides two different kinds of geometric transformations:
|
| 10 |
+
- Abstract transformations, such as rotations (represented by \ref AngleAxis "angle and axis" or by a \ref Quaternion "quaternion"), \ref Translation "translations", \ref Scaling() "scalings". These transformations are NOT represented as matrices, but you can nevertheless mix them with matrices and vectors in expressions, and convert them to matrices if you wish.
|
| 11 |
+
- Projective or affine transformation matrices: see the Transform class. These are really matrices.
|
| 12 |
+
|
| 13 |
+
\note If you are working with OpenGL 4x4 matrices then Affine3f and Affine3d are what you want. Since Eigen defaults to column-major storage, you can directly use the Transform::data() method to pass your transformation matrix to OpenGL.
|
| 14 |
+
|
| 15 |
+
You can construct a Transform from an abstract transformation, like this:
|
| 16 |
+
\code
|
| 17 |
+
Transform t(AngleAxis(angle,axis));
|
| 18 |
+
\endcode
|
| 19 |
+
or like this:
|
| 20 |
+
\code
|
| 21 |
+
Transform t;
|
| 22 |
+
t = AngleAxis(angle,axis);
|
| 23 |
+
\endcode
|
| 24 |
+
But note that unfortunately, because of how C++ works, you can \b not do this:
|
| 25 |
+
\code
|
| 26 |
+
Transform t = AngleAxis(angle,axis);
|
| 27 |
+
\endcode
|
| 28 |
+
<span class="note">\b Explanation: In the C++ language, this would require Transform to have a non-explicit conversion constructor from AngleAxis, but we really don't want to allow implicit casting here.
|
| 29 |
+
</span>
|
| 30 |
+
|
| 31 |
+
\section TutorialGeoElementaryTransformations Transformation types
|
| 32 |
+
|
| 33 |
+
<table class="manual">
|
| 34 |
+
<tr><th>Transformation type</th><th>Typical initialization code</th></tr>
|
| 35 |
+
<tr><td>
|
| 36 |
+
\ref Rotation2D "2D rotation" from an angle</td><td>\code
|
| 37 |
+
Rotation2D<float> rot2(angle_in_radian);\endcode</td></tr>
|
| 38 |
+
<tr class="alt"><td>
|
| 39 |
+
3D rotation as an \ref AngleAxis "angle + axis"</td><td>\code
|
| 40 |
+
AngleAxis<float> aa(angle_in_radian, Vector3f(ax,ay,az));\endcode
|
| 41 |
+
<span class="note">The axis vector must be normalized.</span></td></tr>
|
| 42 |
+
<tr><td>
|
| 43 |
+
3D rotation as a \ref Quaternion "quaternion"</td><td>\code
|
| 44 |
+
Quaternion<float> q; q = AngleAxis<float>(angle_in_radian, axis);\endcode</td></tr>
|
| 45 |
+
<tr class="alt"><td>
|
| 46 |
+
N-D Scaling</td><td>\code
|
| 47 |
+
Scaling(sx, sy)
|
| 48 |
+
Scaling(sx, sy, sz)
|
| 49 |
+
Scaling(s)
|
| 50 |
+
Scaling(vecN)\endcode</td></tr>
|
| 51 |
+
<tr><td>
|
| 52 |
+
N-D Translation</td><td>\code
|
| 53 |
+
Translation<float,2>(tx, ty)
|
| 54 |
+
Translation<float,3>(tx, ty, tz)
|
| 55 |
+
Translation<float,N>(s)
|
| 56 |
+
Translation<float,N>(vecN)\endcode</td></tr>
|
| 57 |
+
<tr class="alt"><td>
|
| 58 |
+
N-D \ref TutorialGeoTransform "Affine transformation"</td><td>\code
|
| 59 |
+
Transform<float,N,Affine> t = concatenation_of_any_transformations;
|
| 60 |
+
Transform<float,3,Affine> t = Translation3f(p) * AngleAxisf(a,axis) * Scaling(s);\endcode</td></tr>
|
| 61 |
+
<tr><td>
|
| 62 |
+
N-D Linear transformations \n
|
| 63 |
+
<em class=note>(pure rotations, \n scaling, etc.)</em></td><td>\code
|
| 64 |
+
Matrix<float,N> t = concatenation_of_rotations_and_scalings;
|
| 65 |
+
Matrix<float,2> t = Rotation2Df(a) * Scaling(s);
|
| 66 |
+
Matrix<float,3> t = AngleAxisf(a,axis) * Scaling(s);\endcode</td></tr>
|
| 67 |
+
</table>
|
| 68 |
+
|
| 69 |
+
<strong>Notes on rotations</strong>\n To transform more than a single vector the preferred
|
| 70 |
+
representations are rotation matrices, while for other usages Quaternion is the
|
| 71 |
+
representation of choice as they are compact, fast and stable. Finally Rotation2D and
|
| 72 |
+
AngleAxis are mainly convenient types to create other rotation objects.
|
| 73 |
+
|
| 74 |
+
<strong>Notes on Translation and Scaling</strong>\n Like AngleAxis, these classes were
|
| 75 |
+
designed to simplify the creation/initialization of linear (Matrix) and affine (Transform)
|
| 76 |
+
transformations. Nevertheless, unlike AngleAxis which is inefficient to use, these classes
|
| 77 |
+
might still be interesting to write generic and efficient algorithms taking as input any
|
| 78 |
+
kind of transformations.
|
| 79 |
+
|
| 80 |
+
Any of the above transformation types can be converted to any other types of the same nature,
|
| 81 |
+
or to a more generic type. Here are some additional examples:
|
| 82 |
+
<table class="manual">
|
| 83 |
+
<tr><td>\code
|
| 84 |
+
Rotation2Df r; r = Matrix2f(..); // assumes a pure rotation matrix
|
| 85 |
+
AngleAxisf aa; aa = Quaternionf(..);
|
| 86 |
+
AngleAxisf aa; aa = Matrix3f(..); // assumes a pure rotation matrix
|
| 87 |
+
Matrix2f m; m = Rotation2Df(..);
|
| 88 |
+
Matrix3f m; m = Quaternionf(..); Matrix3f m; m = Scaling(..);
|
| 89 |
+
Affine3f m; m = AngleAxis3f(..); Affine3f m; m = Scaling(..);
|
| 90 |
+
Affine3f m; m = Translation3f(..); Affine3f m; m = Matrix3f(..);
|
| 91 |
+
\endcode</td></tr>
|
| 92 |
+
</table>
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
<a href="#" class="top">top</a>\section TutorialGeoCommontransformationAPI Common API across transformation types
|
| 96 |
+
|
| 97 |
+
To some extent, Eigen's \ref Geometry_Module "geometry module" allows you to write
|
| 98 |
+
generic algorithms working on any kind of transformation representations:
|
| 99 |
+
<table class="manual">
|
| 100 |
+
<tr><td>
|
| 101 |
+
Concatenation of two transformations</td><td>\code
|
| 102 |
+
gen1 * gen2;\endcode</td></tr>
|
| 103 |
+
<tr class="alt"><td>Apply the transformation to a vector</td><td>\code
|
| 104 |
+
vec2 = gen1 * vec1;\endcode</td></tr>
|
| 105 |
+
<tr><td>Get the inverse of the transformation</td><td>\code
|
| 106 |
+
gen2 = gen1.inverse();\endcode</td></tr>
|
| 107 |
+
<tr class="alt"><td>Spherical interpolation \n (Rotation2D and Quaternion only)</td><td>\code
|
| 108 |
+
rot3 = rot1.slerp(alpha,rot2);\endcode</td></tr>
|
| 109 |
+
</table>
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
<a href="#" class="top">top</a>\section TutorialGeoTransform Affine transformations
|
| 114 |
+
Generic affine transformations are represented by the Transform class which internally
|
| 115 |
+
is a (Dim+1)^2 matrix. In Eigen we have chosen to not distinghish between points and
|
| 116 |
+
vectors such that all points are actually represented by displacement vectors from the
|
| 117 |
+
origin ( \f$ \mathbf{p} \equiv \mathbf{p}-0 \f$ ). With that in mind, real points and
|
| 118 |
+
vector distinguish when the transformation is applied.
|
| 119 |
+
<table class="manual">
|
| 120 |
+
<tr><td>
|
| 121 |
+
Apply the transformation to a \b point </td><td>\code
|
| 122 |
+
VectorNf p1, p2;
|
| 123 |
+
p2 = t * p1;\endcode</td></tr>
|
| 124 |
+
<tr class="alt"><td>
|
| 125 |
+
Apply the transformation to a \b vector </td><td>\code
|
| 126 |
+
VectorNf vec1, vec2;
|
| 127 |
+
vec2 = t.linear() * vec1;\endcode</td></tr>
|
| 128 |
+
<tr><td>
|
| 129 |
+
Apply a \em general transformation \n to a \b normal \b vector \n
|
| 130 |
+
</td><td>\code
|
| 131 |
+
VectorNf n1, n2;
|
| 132 |
+
MatrixNf normalMatrix = t.linear().inverse().transpose();
|
| 133 |
+
n2 = (normalMatrix * n1).normalized();\endcode</td></tr>
|
| 134 |
+
<tr><td colspan="2">(See subject 5.27 of this <a href="http://www.faqs.org/faqs/graphics/algorithms-faq">faq</a> for the explanations)</td></tr>
|
| 135 |
+
<tr class="alt"><td>
|
| 136 |
+
Apply a transformation with \em pure \em rotation \n to a \b normal \b vector
|
| 137 |
+
(no scaling, no shear)</td><td>\code
|
| 138 |
+
n2 = t.linear() * n1;\endcode</td></tr>
|
| 139 |
+
<tr><td>
|
| 140 |
+
OpenGL compatibility \b 3D </td><td>\code
|
| 141 |
+
glLoadMatrixf(t.data());\endcode</td></tr>
|
| 142 |
+
<tr class="alt"><td>
|
| 143 |
+
OpenGL compatibility \b 2D </td><td>\code
|
| 144 |
+
Affine3f aux(Affine3f::Identity());
|
| 145 |
+
aux.linear().topLeftCorner<2,2>() = t.linear();
|
| 146 |
+
aux.translation().start<2>() = t.translation();
|
| 147 |
+
glLoadMatrixf(aux.data());\endcode</td></tr>
|
| 148 |
+
</table>
|
| 149 |
+
|
| 150 |
+
\b Component \b accessors
|
| 151 |
+
<table class="manual">
|
| 152 |
+
<tr><td>
|
| 153 |
+
full read-write access to the internal matrix</td><td>\code
|
| 154 |
+
t.matrix() = matN1xN1; // N1 means N+1
|
| 155 |
+
matN1xN1 = t.matrix();
|
| 156 |
+
\endcode</td></tr>
|
| 157 |
+
<tr class="alt"><td>
|
| 158 |
+
coefficient accessors</td><td>\code
|
| 159 |
+
t(i,j) = scalar; <=> t.matrix()(i,j) = scalar;
|
| 160 |
+
scalar = t(i,j); <=> scalar = t.matrix()(i,j);
|
| 161 |
+
\endcode</td></tr>
|
| 162 |
+
<tr><td>
|
| 163 |
+
translation part</td><td>\code
|
| 164 |
+
t.translation() = vecN;
|
| 165 |
+
vecN = t.translation();
|
| 166 |
+
\endcode</td></tr>
|
| 167 |
+
<tr class="alt"><td>
|
| 168 |
+
linear part</td><td>\code
|
| 169 |
+
t.linear() = matNxN;
|
| 170 |
+
matNxN = t.linear();
|
| 171 |
+
\endcode</td></tr>
|
| 172 |
+
<tr><td>
|
| 173 |
+
extract the rotation matrix</td><td>\code
|
| 174 |
+
matNxN = t.rotation();
|
| 175 |
+
\endcode</td></tr>
|
| 176 |
+
</table>
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
\b Transformation \b creation \n
|
| 180 |
+
While transformation objects can be created and updated concatenating elementary transformations,
|
| 181 |
+
the Transform class also features a procedural API:
|
| 182 |
+
<table class="manual">
|
| 183 |
+
<tr><th></th><th>procedural API</th><th>equivalent natural API </th></tr>
|
| 184 |
+
<tr><td>Translation</td><td>\code
|
| 185 |
+
t.translate(Vector_(tx,ty,..));
|
| 186 |
+
t.pretranslate(Vector_(tx,ty,..));
|
| 187 |
+
\endcode</td><td>\code
|
| 188 |
+
t *= Translation_(tx,ty,..);
|
| 189 |
+
t = Translation_(tx,ty,..) * t;
|
| 190 |
+
\endcode</td></tr>
|
| 191 |
+
<tr class="alt"><td>\b Rotation \n <em class="note">In 2D and for the procedural API, any_rotation can also \n be an angle in radian</em></td><td>\code
|
| 192 |
+
t.rotate(any_rotation);
|
| 193 |
+
t.prerotate(any_rotation);
|
| 194 |
+
\endcode</td><td>\code
|
| 195 |
+
t *= any_rotation;
|
| 196 |
+
t = any_rotation * t;
|
| 197 |
+
\endcode</td></tr>
|
| 198 |
+
<tr><td>Scaling</td><td>\code
|
| 199 |
+
t.scale(Vector_(sx,sy,..));
|
| 200 |
+
t.scale(s);
|
| 201 |
+
t.prescale(Vector_(sx,sy,..));
|
| 202 |
+
t.prescale(s);
|
| 203 |
+
\endcode</td><td>\code
|
| 204 |
+
t *= Scaling(sx,sy,..);
|
| 205 |
+
t *= Scaling(s);
|
| 206 |
+
t = Scaling(sx,sy,..) * t;
|
| 207 |
+
t = Scaling(s) * t;
|
| 208 |
+
\endcode</td></tr>
|
| 209 |
+
<tr class="alt"><td>Shear transformation \n ( \b 2D \b only ! )</td><td>\code
|
| 210 |
+
t.shear(sx,sy);
|
| 211 |
+
t.preshear(sx,sy);
|
| 212 |
+
\endcode</td><td></td></tr>
|
| 213 |
+
</table>
|
| 214 |
+
|
| 215 |
+
Note that in both API, any many transformations can be concatenated in a single expression as shown in the two following equivalent examples:
|
| 216 |
+
<table class="manual">
|
| 217 |
+
<tr><td>\code
|
| 218 |
+
t.pretranslate(..).rotate(..).translate(..).scale(..);
|
| 219 |
+
\endcode</td></tr>
|
| 220 |
+
<tr><td>\code
|
| 221 |
+
t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling(..);
|
| 222 |
+
\endcode</td></tr>
|
| 223 |
+
</table>
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
<a href="#" class="top">top</a>\section TutorialGeoEulerAngles Euler angles
|
| 228 |
+
<table class="manual">
|
| 229 |
+
<tr><td style="max-width:30em;">
|
| 230 |
+
Euler angles might be convenient to create rotation objects.
|
| 231 |
+
On the other hand, since there exist 24 different conventions, they are pretty confusing to use. This example shows how
|
| 232 |
+
to create a rotation matrix according to the 2-1-2 convention.</td><td>\code
|
| 233 |
+
Matrix3f m;
|
| 234 |
+
m = AngleAxisf(angle1, Vector3f::UnitZ())
|
| 235 |
+
* * AngleAxisf(angle2, Vector3f::UnitY())
|
| 236 |
+
* * AngleAxisf(angle3, Vector3f::UnitZ());
|
| 237 |
+
\endcode</td></tr>
|
| 238 |
+
</table>
|
| 239 |
+
|
| 240 |
+
*/
|
| 241 |
+
|
| 242 |
+
}
|
include/eigen/doc/TutorialLinearAlgebra.dox
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialLinearAlgebra Linear algebra and decompositions
|
| 4 |
+
|
| 5 |
+
This page explains how to solve linear systems, compute various decompositions such as LU,
|
| 6 |
+
QR, %SVD, eigendecompositions... After reading this page, don't miss our
|
| 7 |
+
\link TopicLinearAlgebraDecompositions catalogue \endlink of dense matrix decompositions.
|
| 8 |
+
|
| 9 |
+
\eigenAutoToc
|
| 10 |
+
|
| 11 |
+
\section TutorialLinAlgBasicSolve Basic linear solving
|
| 12 |
+
|
| 13 |
+
\b The \b problem: You have a system of equations, that you have written as a single matrix equation
|
| 14 |
+
\f[ Ax \: = \: b \f]
|
| 15 |
+
Where \a A and \a b are matrices (\a b could be a vector, as a special case). You want to find a solution \a x.
|
| 16 |
+
|
| 17 |
+
\b The \b solution: You can choose between various decompositions, depending on the properties of your matrix \a A,
|
| 18 |
+
and depending on whether you favor speed or accuracy. However, let's start with an example that works in all cases,
|
| 19 |
+
and is a good compromise:
|
| 20 |
+
<table class="example">
|
| 21 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 22 |
+
<tr>
|
| 23 |
+
<td>\include TutorialLinAlgExSolveColPivHouseholderQR.cpp </td>
|
| 24 |
+
<td>\verbinclude TutorialLinAlgExSolveColPivHouseholderQR.out </td>
|
| 25 |
+
</tr>
|
| 26 |
+
</table>
|
| 27 |
+
|
| 28 |
+
In this example, the colPivHouseholderQr() method returns an object of class ColPivHouseholderQR. Since here the
|
| 29 |
+
matrix is of type Matrix3f, this line could have been replaced by:
|
| 30 |
+
\code
|
| 31 |
+
ColPivHouseholderQR<Matrix3f> dec(A);
|
| 32 |
+
Vector3f x = dec.solve(b);
|
| 33 |
+
\endcode
|
| 34 |
+
|
| 35 |
+
Here, ColPivHouseholderQR is a QR decomposition with column pivoting. It's a good compromise for this tutorial, as it
|
| 36 |
+
works for all matrices while being quite fast. Here is a table of some other decompositions that you can choose from,
|
| 37 |
+
depending on your matrix, the problem you are trying to solve, and the trade-off you want to make:
|
| 38 |
+
|
| 39 |
+
<table class="manual">
|
| 40 |
+
<tr>
|
| 41 |
+
<th>Decomposition</th>
|
| 42 |
+
<th>Method</th>
|
| 43 |
+
<th>Requirements<br/>on the matrix</th>
|
| 44 |
+
<th>Speed<br/> (small-to-medium)</th>
|
| 45 |
+
<th>Speed<br/> (large)</th>
|
| 46 |
+
<th>Accuracy</th>
|
| 47 |
+
</tr>
|
| 48 |
+
<tr>
|
| 49 |
+
<td>PartialPivLU</td>
|
| 50 |
+
<td>partialPivLu()</td>
|
| 51 |
+
<td>Invertible</td>
|
| 52 |
+
<td>++</td>
|
| 53 |
+
<td>++</td>
|
| 54 |
+
<td>+</td>
|
| 55 |
+
</tr>
|
| 56 |
+
<tr class="alt">
|
| 57 |
+
<td>FullPivLU</td>
|
| 58 |
+
<td>fullPivLu()</td>
|
| 59 |
+
<td>None</td>
|
| 60 |
+
<td>-</td>
|
| 61 |
+
<td>- -</td>
|
| 62 |
+
<td>+++</td>
|
| 63 |
+
</tr>
|
| 64 |
+
<tr>
|
| 65 |
+
<td>HouseholderQR</td>
|
| 66 |
+
<td>householderQr()</td>
|
| 67 |
+
<td>None</td>
|
| 68 |
+
<td>++</td>
|
| 69 |
+
<td>++</td>
|
| 70 |
+
<td>+</td>
|
| 71 |
+
</tr>
|
| 72 |
+
<tr class="alt">
|
| 73 |
+
<td>ColPivHouseholderQR</td>
|
| 74 |
+
<td>colPivHouseholderQr()</td>
|
| 75 |
+
<td>None</td>
|
| 76 |
+
<td>+</td>
|
| 77 |
+
<td>-</td>
|
| 78 |
+
<td>+++</td>
|
| 79 |
+
</tr>
|
| 80 |
+
<tr>
|
| 81 |
+
<td>FullPivHouseholderQR</td>
|
| 82 |
+
<td>fullPivHouseholderQr()</td>
|
| 83 |
+
<td>None</td>
|
| 84 |
+
<td>-</td>
|
| 85 |
+
<td>- -</td>
|
| 86 |
+
<td>+++</td>
|
| 87 |
+
</tr>
|
| 88 |
+
<tr class="alt">
|
| 89 |
+
<td>CompleteOrthogonalDecomposition</td>
|
| 90 |
+
<td>completeOrthogonalDecomposition()</td>
|
| 91 |
+
<td>None</td>
|
| 92 |
+
<td>+</td>
|
| 93 |
+
<td>-</td>
|
| 94 |
+
<td>+++</td>
|
| 95 |
+
</tr>
|
| 96 |
+
<tr class="alt">
|
| 97 |
+
<td>LLT</td>
|
| 98 |
+
<td>llt()</td>
|
| 99 |
+
<td>Positive definite</td>
|
| 100 |
+
<td>+++</td>
|
| 101 |
+
<td>+++</td>
|
| 102 |
+
<td>+</td>
|
| 103 |
+
</tr>
|
| 104 |
+
<tr>
|
| 105 |
+
<td>LDLT</td>
|
| 106 |
+
<td>ldlt()</td>
|
| 107 |
+
<td>Positive or negative<br/> semidefinite</td>
|
| 108 |
+
<td>+++</td>
|
| 109 |
+
<td>+</td>
|
| 110 |
+
<td>++</td>
|
| 111 |
+
</tr>
|
| 112 |
+
<tr class="alt">
|
| 113 |
+
<td>BDCSVD</td>
|
| 114 |
+
<td>bdcSvd()</td>
|
| 115 |
+
<td>None</td>
|
| 116 |
+
<td>-</td>
|
| 117 |
+
<td>-</td>
|
| 118 |
+
<td>+++</td>
|
| 119 |
+
</tr>
|
| 120 |
+
<tr class="alt">
|
| 121 |
+
<td>JacobiSVD</td>
|
| 122 |
+
<td>jacobiSvd()</td>
|
| 123 |
+
<td>None</td>
|
| 124 |
+
<td>-</td>
|
| 125 |
+
<td>- - -</td>
|
| 126 |
+
<td>+++</td>
|
| 127 |
+
</tr>
|
| 128 |
+
</table>
|
| 129 |
+
To get an overview of the true relative speed of the different decompositions, check this \link DenseDecompositionBenchmark benchmark \endlink.
|
| 130 |
+
|
| 131 |
+
All of these decompositions offer a solve() method that works as in the above example.
|
| 132 |
+
|
| 133 |
+
If you know more about the properties of your matrix, you can use the above table to select the best method.
|
| 134 |
+
For example, a good choice for solving linear systems with a non-symmetric matrix of full rank is PartialPivLU.
|
| 135 |
+
If you know that your matrix is also symmetric and positive definite, the above table says that
|
| 136 |
+
a very good choice is the LLT or LDLT decomposition. Here's an example, also demonstrating that using a general
|
| 137 |
+
matrix (not a vector) as right hand side is possible:
|
| 138 |
+
|
| 139 |
+
<table class="example">
|
| 140 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 141 |
+
<tr>
|
| 142 |
+
<td>\include TutorialLinAlgExSolveLDLT.cpp </td>
|
| 143 |
+
<td>\verbinclude TutorialLinAlgExSolveLDLT.out </td>
|
| 144 |
+
</tr>
|
| 145 |
+
</table>
|
| 146 |
+
|
| 147 |
+
For a \ref TopicLinearAlgebraDecompositions "much more complete table" comparing all decompositions supported by Eigen (notice that Eigen
|
| 148 |
+
supports many other decompositions), see our special page on
|
| 149 |
+
\ref TopicLinearAlgebraDecompositions "this topic".
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
\section TutorialLinAlgLeastsquares Least squares solving
|
| 153 |
+
|
| 154 |
+
The most general and accurate method to solve under- or over-determined linear systems
|
| 155 |
+
in the least squares sense, is the SVD decomposition. Eigen provides two implementations.
|
| 156 |
+
The recommended one is the BDCSVD class, which scales well for large problems
|
| 157 |
+
and automatically falls back to the JacobiSVD class for smaller problems.
|
| 158 |
+
For both classes, their solve() method solved the linear system in the least-squares
|
| 159 |
+
sense.
|
| 160 |
+
|
| 161 |
+
Here is an example:
|
| 162 |
+
<table class="example">
|
| 163 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 164 |
+
<tr>
|
| 165 |
+
<td>\include TutorialLinAlgSVDSolve.cpp </td>
|
| 166 |
+
<td>\verbinclude TutorialLinAlgSVDSolve.out </td>
|
| 167 |
+
</tr>
|
| 168 |
+
</table>
|
| 169 |
+
|
| 170 |
+
An alternative to the SVD, which is usually faster and about as accurate, is CompleteOrthogonalDecomposition.
|
| 171 |
+
|
| 172 |
+
Again, if you know more about the problem, the table above contains methods that are potentially faster.
|
| 173 |
+
If your matrix is full rank, HouseHolderQR is the method of choice. If your matrix is full rank and well conditioned,
|
| 174 |
+
using the Cholesky decomposition (LLT) on the matrix of the normal equations can be faster still.
|
| 175 |
+
Our page on \link LeastSquares least squares solving \endlink has more details.
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
\section TutorialLinAlgSolutionExists Checking if a matrix is singular
|
| 179 |
+
|
| 180 |
+
Only you know what error margin you want to allow for a solution to be considered valid.
|
| 181 |
+
So Eigen lets you do this computation for yourself, if you want to, as in this example:
|
| 182 |
+
|
| 183 |
+
<table class="example">
|
| 184 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 185 |
+
<tr>
|
| 186 |
+
<td>\include TutorialLinAlgExComputeSolveError.cpp </td>
|
| 187 |
+
<td>\verbinclude TutorialLinAlgExComputeSolveError.out </td>
|
| 188 |
+
</tr>
|
| 189 |
+
</table>
|
| 190 |
+
|
| 191 |
+
\section TutorialLinAlgEigensolving Computing eigenvalues and eigenvectors
|
| 192 |
+
|
| 193 |
+
You need an eigendecomposition here, see available such decompositions on \ref TopicLinearAlgebraDecompositions "this page".
|
| 194 |
+
Make sure to check if your matrix is self-adjoint, as is often the case in these problems. Here's an example using
|
| 195 |
+
SelfAdjointEigenSolver, it could easily be adapted to general matrices using EigenSolver or ComplexEigenSolver.
|
| 196 |
+
|
| 197 |
+
The computation of eigenvalues and eigenvectors does not necessarily converge, but such failure to converge is
|
| 198 |
+
very rare. The call to info() is to check for this possibility.
|
| 199 |
+
|
| 200 |
+
<table class="example">
|
| 201 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 202 |
+
<tr>
|
| 203 |
+
<td>\include TutorialLinAlgSelfAdjointEigenSolver.cpp </td>
|
| 204 |
+
<td>\verbinclude TutorialLinAlgSelfAdjointEigenSolver.out </td>
|
| 205 |
+
</tr>
|
| 206 |
+
</table>
|
| 207 |
+
|
| 208 |
+
\section TutorialLinAlgInverse Computing inverse and determinant
|
| 209 |
+
|
| 210 |
+
First of all, make sure that you really want this. While inverse and determinant are fundamental mathematical concepts,
|
| 211 |
+
in \em numerical linear algebra they are not as useful as in pure mathematics. Inverse computations are often
|
| 212 |
+
advantageously replaced by solve() operations, and the determinant is often \em not a good way of checking if a matrix
|
| 213 |
+
is invertible.
|
| 214 |
+
|
| 215 |
+
However, for \em very \em small matrices, the above may not be true, and inverse and determinant can be very useful.
|
| 216 |
+
|
| 217 |
+
While certain decompositions, such as PartialPivLU and FullPivLU, offer inverse() and determinant() methods, you can also
|
| 218 |
+
call inverse() and determinant() directly on a matrix. If your matrix is of a very small fixed size (at most 4x4) this
|
| 219 |
+
allows Eigen to avoid performing a LU decomposition, and instead use formulas that are more efficient on such small matrices.
|
| 220 |
+
|
| 221 |
+
Here is an example:
|
| 222 |
+
<table class="example">
|
| 223 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 224 |
+
<tr>
|
| 225 |
+
<td>\include TutorialLinAlgInverseDeterminant.cpp </td>
|
| 226 |
+
<td>\verbinclude TutorialLinAlgInverseDeterminant.out </td>
|
| 227 |
+
</tr>
|
| 228 |
+
</table>
|
| 229 |
+
|
| 230 |
+
\section TutorialLinAlgSeparateComputation Separating the computation from the construction
|
| 231 |
+
|
| 232 |
+
In the above examples, the decomposition was computed at the same time that the decomposition object was constructed.
|
| 233 |
+
There are however situations where you might want to separate these two things, for example if you don't know,
|
| 234 |
+
at the time of the construction, the matrix that you will want to decompose; or if you want to reuse an existing
|
| 235 |
+
decomposition object.
|
| 236 |
+
|
| 237 |
+
What makes this possible is that:
|
| 238 |
+
\li all decompositions have a default constructor,
|
| 239 |
+
\li all decompositions have a compute(matrix) method that does the computation, and that may be called again
|
| 240 |
+
on an already-computed decomposition, reinitializing it.
|
| 241 |
+
|
| 242 |
+
For example:
|
| 243 |
+
|
| 244 |
+
<table class="example">
|
| 245 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 246 |
+
<tr>
|
| 247 |
+
<td>\include TutorialLinAlgComputeTwice.cpp </td>
|
| 248 |
+
<td>\verbinclude TutorialLinAlgComputeTwice.out </td>
|
| 249 |
+
</tr>
|
| 250 |
+
</table>
|
| 251 |
+
|
| 252 |
+
Finally, you can tell the decomposition constructor to preallocate storage for decomposing matrices of a given size,
|
| 253 |
+
so that when you subsequently decompose such matrices, no dynamic memory allocation is performed (of course, if you
|
| 254 |
+
are using fixed-size matrices, no dynamic memory allocation happens at all). This is done by just
|
| 255 |
+
passing the size to the decomposition constructor, as in this example:
|
| 256 |
+
\code
|
| 257 |
+
HouseholderQR<MatrixXf> qr(50,50);
|
| 258 |
+
MatrixXf A = MatrixXf::Random(50,50);
|
| 259 |
+
qr.compute(A); // no dynamic memory allocation
|
| 260 |
+
\endcode
|
| 261 |
+
|
| 262 |
+
\section TutorialLinAlgRankRevealing Rank-revealing decompositions
|
| 263 |
+
|
| 264 |
+
Certain decompositions are rank-revealing, i.e. are able to compute the rank of a matrix. These are typically
|
| 265 |
+
also the decompositions that behave best in the face of a non-full-rank matrix (which in the square case means a
|
| 266 |
+
singular matrix). On \ref TopicLinearAlgebraDecompositions "this table" you can see for all our decompositions
|
| 267 |
+
whether they are rank-revealing or not.
|
| 268 |
+
|
| 269 |
+
Rank-revealing decompositions offer at least a rank() method. They can also offer convenience methods such as isInvertible(),
|
| 270 |
+
and some are also providing methods to compute the kernel (null-space) and image (column-space) of the matrix, as is the
|
| 271 |
+
case with FullPivLU:
|
| 272 |
+
|
| 273 |
+
<table class="example">
|
| 274 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 275 |
+
<tr>
|
| 276 |
+
<td>\include TutorialLinAlgRankRevealing.cpp </td>
|
| 277 |
+
<td>\verbinclude TutorialLinAlgRankRevealing.out </td>
|
| 278 |
+
</tr>
|
| 279 |
+
</table>
|
| 280 |
+
|
| 281 |
+
Of course, any rank computation depends on the choice of an arbitrary threshold, since practically no
|
| 282 |
+
floating-point matrix is \em exactly rank-deficient. Eigen picks a sensible default threshold, which depends
|
| 283 |
+
on the decomposition but is typically the diagonal size times machine epsilon. While this is the best default we
|
| 284 |
+
could pick, only you know what is the right threshold for your application. You can set this by calling setThreshold()
|
| 285 |
+
on your decomposition object before calling rank() or any other method that needs to use such a threshold.
|
| 286 |
+
The decomposition itself, i.e. the compute() method, is independent of the threshold. You don't need to recompute the
|
| 287 |
+
decomposition after you've changed the threshold.
|
| 288 |
+
|
| 289 |
+
<table class="example">
|
| 290 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 291 |
+
<tr>
|
| 292 |
+
<td>\include TutorialLinAlgSetThreshold.cpp </td>
|
| 293 |
+
<td>\verbinclude TutorialLinAlgSetThreshold.out </td>
|
| 294 |
+
</tr>
|
| 295 |
+
</table>
|
| 296 |
+
|
| 297 |
+
*/
|
| 298 |
+
|
| 299 |
+
}
|
include/eigen/doc/TutorialMatrixArithmetic.dox
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialMatrixArithmetic Matrix and vector arithmetic
|
| 4 |
+
|
| 5 |
+
This page aims to provide an overview and some details on how to perform arithmetic
|
| 6 |
+
between matrices, vectors and scalars with Eigen.
|
| 7 |
+
|
| 8 |
+
\eigenAutoToc
|
| 9 |
+
|
| 10 |
+
\section TutorialArithmeticIntroduction Introduction
|
| 11 |
+
|
| 12 |
+
Eigen offers matrix/vector arithmetic operations either through overloads of common C++ arithmetic operators such as +, -, *,
|
| 13 |
+
or through special methods such as \link MatrixBase::dot() dot()\endlink, \link MatrixBase::cross() cross()\endlink, etc.
|
| 14 |
+
For the Matrix class (matrices and vectors), operators are only overloaded to support
|
| 15 |
+
linear-algebraic operations. For example, \c matrix1 \c * \c matrix2 means matrix-matrix product,
|
| 16 |
+
and \c vector \c + \c scalar is just not allowed. If you want to perform all kinds of array operations,
|
| 17 |
+
not linear algebra, see the \ref TutorialArrayClass "next page".
|
| 18 |
+
|
| 19 |
+
\section TutorialArithmeticAddSub Addition and subtraction
|
| 20 |
+
|
| 21 |
+
The left hand side and right hand side must, of course, have the same numbers of rows and of columns. They must
|
| 22 |
+
also have the same \c Scalar type, as Eigen doesn't do automatic type promotion. The operators at hand here are:
|
| 23 |
+
\li binary operator + as in \c a+b
|
| 24 |
+
\li binary operator - as in \c a-b
|
| 25 |
+
\li unary operator - as in \c -a
|
| 26 |
+
\li compound operator += as in \c a+=b
|
| 27 |
+
\li compound operator -= as in \c a-=b
|
| 28 |
+
|
| 29 |
+
<table class="example">
|
| 30 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 31 |
+
<tr><td>
|
| 32 |
+
\include tut_arithmetic_add_sub.cpp
|
| 33 |
+
</td>
|
| 34 |
+
<td>
|
| 35 |
+
\verbinclude tut_arithmetic_add_sub.out
|
| 36 |
+
</td></tr></table>
|
| 37 |
+
|
| 38 |
+
\section TutorialArithmeticScalarMulDiv Scalar multiplication and division
|
| 39 |
+
|
| 40 |
+
Multiplication and division by a scalar is very simple too. The operators at hand here are:
|
| 41 |
+
\li binary operator * as in \c matrix*scalar
|
| 42 |
+
\li binary operator * as in \c scalar*matrix
|
| 43 |
+
\li binary operator / as in \c matrix/scalar
|
| 44 |
+
\li compound operator *= as in \c matrix*=scalar
|
| 45 |
+
\li compound operator /= as in \c matrix/=scalar
|
| 46 |
+
|
| 47 |
+
<table class="example">
|
| 48 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 49 |
+
<tr><td>
|
| 50 |
+
\include tut_arithmetic_scalar_mul_div.cpp
|
| 51 |
+
</td>
|
| 52 |
+
<td>
|
| 53 |
+
\verbinclude tut_arithmetic_scalar_mul_div.out
|
| 54 |
+
</td></tr></table>
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
\section TutorialArithmeticMentionXprTemplates A note about expression templates
|
| 58 |
+
|
| 59 |
+
This is an advanced topic that we explain on \ref TopicEigenExpressionTemplates "this page",
|
| 60 |
+
but it is useful to just mention it now. In Eigen, arithmetic operators such as \c operator+ don't
|
| 61 |
+
perform any computation by themselves, they just return an "expression object" describing the computation to be
|
| 62 |
+
performed. The actual computation happens later, when the whole expression is evaluated, typically in \c operator=.
|
| 63 |
+
While this might sound heavy, any modern optimizing compiler is able to optimize away that abstraction and
|
| 64 |
+
the result is perfectly optimized code. For example, when you do:
|
| 65 |
+
\code
|
| 66 |
+
VectorXf a(50), b(50), c(50), d(50);
|
| 67 |
+
...
|
| 68 |
+
a = 3*b + 4*c + 5*d;
|
| 69 |
+
\endcode
|
| 70 |
+
Eigen compiles it to just one for loop, so that the arrays are traversed only once. Simplifying (e.g. ignoring
|
| 71 |
+
SIMD optimizations), this loop looks like this:
|
| 72 |
+
\code
|
| 73 |
+
for(int i = 0; i < 50; ++i)
|
| 74 |
+
a[i] = 3*b[i] + 4*c[i] + 5*d[i];
|
| 75 |
+
\endcode
|
| 76 |
+
Thus, you should not be afraid of using relatively large arithmetic expressions with Eigen: it only gives Eigen
|
| 77 |
+
more opportunities for optimization.
|
| 78 |
+
|
| 79 |
+
\section TutorialArithmeticTranspose Transposition and conjugation
|
| 80 |
+
|
| 81 |
+
The transpose \f$ a^T \f$, conjugate \f$ \bar{a} \f$, and adjoint (i.e., conjugate transpose) \f$ a^* \f$ of a matrix or vector \f$ a \f$ are obtained by the member functions \link DenseBase::transpose() transpose()\endlink, \link MatrixBase::conjugate() conjugate()\endlink, and \link MatrixBase::adjoint() adjoint()\endlink, respectively.
|
| 82 |
+
|
| 83 |
+
<table class="example">
|
| 84 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 85 |
+
<tr><td>
|
| 86 |
+
\include tut_arithmetic_transpose_conjugate.cpp
|
| 87 |
+
</td>
|
| 88 |
+
<td>
|
| 89 |
+
\verbinclude tut_arithmetic_transpose_conjugate.out
|
| 90 |
+
</td></tr></table>
|
| 91 |
+
|
| 92 |
+
For real matrices, \c conjugate() is a no-operation, and so \c adjoint() is equivalent to \c transpose().
|
| 93 |
+
|
| 94 |
+
As for basic arithmetic operators, \c transpose() and \c adjoint() simply return a proxy object without doing the actual transposition. If you do <tt>b = a.transpose()</tt>, then the transpose is evaluated at the same time as the result is written into \c b. However, there is a complication here. If you do <tt>a = a.transpose()</tt>, then Eigen starts writing the result into \c a before the evaluation of the transpose is finished. Therefore, the instruction <tt>a = a.transpose()</tt> does not replace \c a with its transpose, as one would expect:
|
| 95 |
+
<table class="example">
|
| 96 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 97 |
+
<tr><td>
|
| 98 |
+
\include tut_arithmetic_transpose_aliasing.cpp
|
| 99 |
+
</td>
|
| 100 |
+
<td>
|
| 101 |
+
\verbinclude tut_arithmetic_transpose_aliasing.out
|
| 102 |
+
</td></tr></table>
|
| 103 |
+
This is the so-called \ref TopicAliasing "aliasing issue". In "debug mode", i.e., when \ref TopicAssertions "assertions" have not been disabled, such common pitfalls are automatically detected.
|
| 104 |
+
|
| 105 |
+
For \em in-place transposition, as for instance in <tt>a = a.transpose()</tt>, simply use the \link DenseBase::transposeInPlace() transposeInPlace()\endlink function:
|
| 106 |
+
<table class="example">
|
| 107 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 108 |
+
<tr><td>
|
| 109 |
+
\include tut_arithmetic_transpose_inplace.cpp
|
| 110 |
+
</td>
|
| 111 |
+
<td>
|
| 112 |
+
\verbinclude tut_arithmetic_transpose_inplace.out
|
| 113 |
+
</td></tr></table>
|
| 114 |
+
There is also the \link MatrixBase::adjointInPlace() adjointInPlace()\endlink function for complex matrices.
|
| 115 |
+
|
| 116 |
+
\section TutorialArithmeticMatrixMul Matrix-matrix and matrix-vector multiplication
|
| 117 |
+
|
| 118 |
+
Matrix-matrix multiplication is again done with \c operator*. Since vectors are a special
|
| 119 |
+
case of matrices, they are implicitly handled there too, so matrix-vector product is really just a special
|
| 120 |
+
case of matrix-matrix product, and so is vector-vector outer product. Thus, all these cases are handled by just
|
| 121 |
+
two operators:
|
| 122 |
+
\li binary operator * as in \c a*b
|
| 123 |
+
\li compound operator *= as in \c a*=b (this multiplies on the right: \c a*=b is equivalent to <tt>a = a*b</tt>)
|
| 124 |
+
|
| 125 |
+
<table class="example">
|
| 126 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 127 |
+
<tr><td>
|
| 128 |
+
\include tut_arithmetic_matrix_mul.cpp
|
| 129 |
+
</td>
|
| 130 |
+
<td>
|
| 131 |
+
\verbinclude tut_arithmetic_matrix_mul.out
|
| 132 |
+
</td></tr></table>
|
| 133 |
+
|
| 134 |
+
Note: if you read the above paragraph on expression templates and are worried that doing \c m=m*m might cause
|
| 135 |
+
aliasing issues, be reassured for now: Eigen treats matrix multiplication as a special case and takes care of
|
| 136 |
+
introducing a temporary here, so it will compile \c m=m*m as:
|
| 137 |
+
\code
|
| 138 |
+
tmp = m*m;
|
| 139 |
+
m = tmp;
|
| 140 |
+
\endcode
|
| 141 |
+
If you know your matrix product can be safely evaluated into the destination matrix without aliasing issue, then you can use the \link MatrixBase::noalias() noalias()\endlink function to avoid the temporary, e.g.:
|
| 142 |
+
\code
|
| 143 |
+
c.noalias() += a * b;
|
| 144 |
+
\endcode
|
| 145 |
+
For more details on this topic, see the page on \ref TopicAliasing "aliasing".
|
| 146 |
+
|
| 147 |
+
\b Note: for BLAS users worried about performance, expressions such as <tt>c.noalias() -= 2 * a.adjoint() * b;</tt> are fully optimized and trigger a single gemm-like function call.
|
| 148 |
+
|
| 149 |
+
\section TutorialArithmeticDotAndCross Dot product and cross product
|
| 150 |
+
|
| 151 |
+
For dot product and cross product, you need the \link MatrixBase::dot() dot()\endlink and \link MatrixBase::cross() cross()\endlink methods. Of course, the dot product can also be obtained as a 1x1 matrix as u.adjoint()*v.
|
| 152 |
+
<table class="example">
|
| 153 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 154 |
+
<tr><td>
|
| 155 |
+
\include tut_arithmetic_dot_cross.cpp
|
| 156 |
+
</td>
|
| 157 |
+
<td>
|
| 158 |
+
\verbinclude tut_arithmetic_dot_cross.out
|
| 159 |
+
</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Remember that cross product is only for vectors of size 3. Dot product is for vectors of any sizes.
|
| 162 |
+
When using complex numbers, Eigen's dot product is conjugate-linear in the first variable and linear in the
|
| 163 |
+
second variable.
|
| 164 |
+
|
| 165 |
+
\section TutorialArithmeticRedux Basic arithmetic reduction operations
|
| 166 |
+
Eigen also provides some reduction operations to reduce a given matrix or vector to a single value such as the sum (computed by \link DenseBase::sum() sum()\endlink), product (\link DenseBase::prod() prod()\endlink), or the maximum (\link DenseBase::maxCoeff() maxCoeff()\endlink) and minimum (\link DenseBase::minCoeff() minCoeff()\endlink) of all its coefficients.
|
| 167 |
+
|
| 168 |
+
<table class="example">
|
| 169 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 170 |
+
<tr><td>
|
| 171 |
+
\include tut_arithmetic_redux_basic.cpp
|
| 172 |
+
</td>
|
| 173 |
+
<td>
|
| 174 |
+
\verbinclude tut_arithmetic_redux_basic.out
|
| 175 |
+
</td></tr></table>
|
| 176 |
+
|
| 177 |
+
The \em trace of a matrix, as returned by the function \link MatrixBase::trace() trace()\endlink, is the sum of the diagonal coefficients and can also be computed as efficiently using <tt>a.diagonal().sum()</tt>, as we will see later on.
|
| 178 |
+
|
| 179 |
+
There also exist variants of the \c minCoeff and \c maxCoeff functions returning the coordinates of the respective coefficient via the arguments:
|
| 180 |
+
|
| 181 |
+
<table class="example">
|
| 182 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 183 |
+
<tr><td>
|
| 184 |
+
\include tut_arithmetic_redux_minmax.cpp
|
| 185 |
+
</td>
|
| 186 |
+
<td>
|
| 187 |
+
\verbinclude tut_arithmetic_redux_minmax.out
|
| 188 |
+
</td></tr></table>
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
\section TutorialArithmeticValidity Validity of operations
|
| 192 |
+
Eigen checks the validity of the operations that you perform. When possible,
|
| 193 |
+
it checks them at compile time, producing compilation errors. These error messages can be long and ugly,
|
| 194 |
+
but Eigen writes the important message in UPPERCASE_LETTERS_SO_IT_STANDS_OUT. For example:
|
| 195 |
+
\code
|
| 196 |
+
Matrix3f m;
|
| 197 |
+
Vector4f v;
|
| 198 |
+
v = m*v; // Compile-time error: YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES
|
| 199 |
+
\endcode
|
| 200 |
+
|
| 201 |
+
Of course, in many cases, for example when checking dynamic sizes, the check cannot be performed at compile time.
|
| 202 |
+
Eigen then uses runtime assertions. This means that the program will abort with an error message when executing an illegal operation if it is run in "debug mode", and it will probably crash if assertions are turned off.
|
| 203 |
+
|
| 204 |
+
\code
|
| 205 |
+
MatrixXf m(3,3);
|
| 206 |
+
VectorXf v(4);
|
| 207 |
+
v = m * v; // Run-time assertion failure here: "invalid matrix product"
|
| 208 |
+
\endcode
|
| 209 |
+
|
| 210 |
+
For more details on this topic, see \ref TopicAssertions "this page".
|
| 211 |
+
|
| 212 |
+
*/
|
| 213 |
+
|
| 214 |
+
}
|
include/eigen/doc/TutorialReductionsVisitorsBroadcasting.dox
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialReductionsVisitorsBroadcasting Reductions, visitors and broadcasting
|
| 4 |
+
|
| 5 |
+
This page explains Eigen's reductions, visitors and broadcasting and how they are used with
|
| 6 |
+
\link MatrixBase matrices \endlink and \link ArrayBase arrays \endlink.
|
| 7 |
+
|
| 8 |
+
\eigenAutoToc
|
| 9 |
+
|
| 10 |
+
\section TutorialReductionsVisitorsBroadcastingReductions Reductions
|
| 11 |
+
In Eigen, a reduction is a function taking a matrix or array, and returning a single
|
| 12 |
+
scalar value. One of the most used reductions is \link DenseBase::sum() .sum() \endlink,
|
| 13 |
+
returning the sum of all the coefficients inside a given matrix or array.
|
| 14 |
+
|
| 15 |
+
<table class="example">
|
| 16 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 17 |
+
<tr><td>
|
| 18 |
+
\include tut_arithmetic_redux_basic.cpp
|
| 19 |
+
</td>
|
| 20 |
+
<td>
|
| 21 |
+
\verbinclude tut_arithmetic_redux_basic.out
|
| 22 |
+
</td></tr></table>
|
| 23 |
+
|
| 24 |
+
The \em trace of a matrix, as returned by the function \c trace(), is the sum of the diagonal coefficients and can equivalently be computed <tt>a.diagonal().sum()</tt>.
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
\subsection TutorialReductionsVisitorsBroadcastingReductionsNorm Norm computations
|
| 28 |
+
|
| 29 |
+
The (Euclidean a.k.a. \f$\ell^2\f$) squared norm of a vector can be obtained \link MatrixBase::squaredNorm() squaredNorm() \endlink. It is equal to the dot product of the vector by itself, and equivalently to the sum of squared absolute values of its coefficients.
|
| 30 |
+
|
| 31 |
+
Eigen also provides the \link MatrixBase::norm() norm() \endlink method, which returns the square root of \link MatrixBase::squaredNorm() squaredNorm() \endlink.
|
| 32 |
+
|
| 33 |
+
These operations can also operate on matrices; in that case, a n-by-p matrix is seen as a vector of size (n*p), so for example the \link MatrixBase::norm() norm() \endlink method returns the "Frobenius" or "Hilbert-Schmidt" norm. We refrain from speaking of the \f$\ell^2\f$ norm of a matrix because that can mean different things.
|
| 34 |
+
|
| 35 |
+
If you want other coefficient-wise \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm lpNorm<p>() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients.
|
| 36 |
+
|
| 37 |
+
The following example demonstrates these methods.
|
| 38 |
+
|
| 39 |
+
<table class="example">
|
| 40 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 41 |
+
<tr><td>
|
| 42 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
|
| 43 |
+
</td>
|
| 44 |
+
<td>
|
| 45 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.out
|
| 46 |
+
</td></tr></table>
|
| 47 |
+
|
| 48 |
+
\b Operator \b norm: The 1-norm and \f$\infty\f$-norm <a href="https://en.wikipedia.org/wiki/Operator_norm">matrix operator norms</a> can easily be computed as follows:
|
| 49 |
+
<table class="example">
|
| 50 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 51 |
+
<tr><td>
|
| 52 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp
|
| 53 |
+
</td>
|
| 54 |
+
<td>
|
| 55 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.out
|
| 56 |
+
</td></tr></table>
|
| 57 |
+
See below for more explanations on the syntax of these expressions.
|
| 58 |
+
|
| 59 |
+
\subsection TutorialReductionsVisitorsBroadcastingReductionsBool Boolean reductions
|
| 60 |
+
|
| 61 |
+
The following reductions operate on boolean values:
|
| 62 |
+
- \link DenseBase::all() all() \endlink returns \b true if all of the coefficients in a given Matrix or Array evaluate to \b true .
|
| 63 |
+
- \link DenseBase::any() any() \endlink returns \b true if at least one of the coefficients in a given Matrix or Array evaluates to \b true .
|
| 64 |
+
- \link DenseBase::count() count() \endlink returns the number of coefficients in a given Matrix or Array that evaluate to \b true.
|
| 65 |
+
|
| 66 |
+
These are typically used in conjunction with the coefficient-wise comparison and equality operators provided by Array. For instance, <tt>array > 0</tt> is an %Array of the same size as \c array , with \b true at those positions where the corresponding coefficient of \c array is positive. Thus, <tt>(array > 0).all()</tt> tests whether all coefficients of \c array are positive. This can be seen in the following example:
|
| 67 |
+
|
| 68 |
+
<table class="example">
|
| 69 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 70 |
+
<tr><td>
|
| 71 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
|
| 72 |
+
</td>
|
| 73 |
+
<td>
|
| 74 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.out
|
| 75 |
+
</td></tr></table>
|
| 76 |
+
|
| 77 |
+
\subsection TutorialReductionsVisitorsBroadcastingReductionsUserdefined User defined reductions
|
| 78 |
+
|
| 79 |
+
TODO
|
| 80 |
+
|
| 81 |
+
In the meantime you can have a look at the DenseBase::redux() function.
|
| 82 |
+
|
| 83 |
+
\section TutorialReductionsVisitorsBroadcastingVisitors Visitors
|
| 84 |
+
Visitors are useful when one wants to obtain the location of a coefficient inside
|
| 85 |
+
a Matrix or Array. The simplest examples are
|
| 86 |
+
\link MatrixBase::maxCoeff() maxCoeff(&x,&y) \endlink and
|
| 87 |
+
\link MatrixBase::minCoeff() minCoeff(&x,&y)\endlink, which can be used to find
|
| 88 |
+
the location of the greatest or smallest coefficient in a Matrix or
|
| 89 |
+
Array.
|
| 90 |
+
|
| 91 |
+
The arguments passed to a visitor are pointers to the variables where the
|
| 92 |
+
row and column position are to be stored. These variables should be of type
|
| 93 |
+
\link Eigen::Index Index \endlink, as shown below:
|
| 94 |
+
|
| 95 |
+
<table class="example">
|
| 96 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 97 |
+
<tr><td>
|
| 98 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
|
| 99 |
+
</td>
|
| 100 |
+
<td>
|
| 101 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_visitors.out
|
| 102 |
+
</td></tr></table>
|
| 103 |
+
|
| 104 |
+
Both functions also return the value of the minimum or maximum coefficient.
|
| 105 |
+
|
| 106 |
+
\section TutorialReductionsVisitorsBroadcastingPartialReductions Partial reductions
|
| 107 |
+
Partial reductions are reductions that can operate column- or row-wise on a Matrix or
|
| 108 |
+
Array, applying the reduction operation on each column or row and
|
| 109 |
+
returning a column or row vector with the corresponding values. Partial reductions are applied
|
| 110 |
+
with \link DenseBase::colwise() colwise() \endlink or \link DenseBase::rowwise() rowwise() \endlink.
|
| 111 |
+
|
| 112 |
+
A simple example is obtaining the maximum of the elements
|
| 113 |
+
in each column in a given matrix, storing the result in a row vector:
|
| 114 |
+
|
| 115 |
+
<table class="example">
|
| 116 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 117 |
+
<tr><td>
|
| 118 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp
|
| 119 |
+
</td>
|
| 120 |
+
<td>
|
| 121 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_colwise.out
|
| 122 |
+
</td></tr></table>
|
| 123 |
+
|
| 124 |
+
The same operation can be performed row-wise:
|
| 125 |
+
|
| 126 |
+
<table class="example">
|
| 127 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 128 |
+
<tr><td>
|
| 129 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp
|
| 130 |
+
</td>
|
| 131 |
+
<td>
|
| 132 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_rowwise.out
|
| 133 |
+
</td></tr></table>
|
| 134 |
+
|
| 135 |
+
<b>Note that column-wise operations return a row vector, while row-wise operations return a column vector.</b>
|
| 136 |
+
|
| 137 |
+
\subsection TutorialReductionsVisitorsBroadcastingPartialReductionsCombined Combining partial reductions with other operations
|
| 138 |
+
It is also possible to use the result of a partial reduction to do further processing.
|
| 139 |
+
Here is another example that finds the column whose sum of elements is the maximum
|
| 140 |
+
within a matrix. With column-wise partial reductions this can be coded as:
|
| 141 |
+
|
| 142 |
+
<table class="example">
|
| 143 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 144 |
+
<tr><td>
|
| 145 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
|
| 146 |
+
</td>
|
| 147 |
+
<td>
|
| 148 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_maxnorm.out
|
| 149 |
+
</td></tr></table>
|
| 150 |
+
|
| 151 |
+
The previous example applies the \link DenseBase::sum() sum() \endlink reduction on each column
|
| 152 |
+
though the \link DenseBase::colwise() colwise() \endlink visitor, obtaining a new matrix whose
|
| 153 |
+
size is 1x4.
|
| 154 |
+
|
| 155 |
+
Therefore, if
|
| 156 |
+
\f[
|
| 157 |
+
\mbox{m} = \begin{bmatrix} 1 & 2 & 6 & 9 \\
|
| 158 |
+
3 & 1 & 7 & 2 \end{bmatrix}
|
| 159 |
+
\f]
|
| 160 |
+
|
| 161 |
+
then
|
| 162 |
+
|
| 163 |
+
\f[
|
| 164 |
+
\mbox{m.colwise().sum()} = \begin{bmatrix} 4 & 3 & 13 & 11 \end{bmatrix}
|
| 165 |
+
\f]
|
| 166 |
+
|
| 167 |
+
The \link DenseBase::maxCoeff() maxCoeff() \endlink reduction is finally applied
|
| 168 |
+
to obtain the column index where the maximum sum is found,
|
| 169 |
+
which is the column index 2 (third column) in this case.
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
\section TutorialReductionsVisitorsBroadcastingBroadcasting Broadcasting
|
| 173 |
+
The concept behind broadcasting is similar to partial reductions, with the difference that broadcasting
|
| 174 |
+
constructs an expression where a vector (column or row) is interpreted as a matrix by replicating it in
|
| 175 |
+
one direction.
|
| 176 |
+
|
| 177 |
+
A simple example is to add a certain column vector to each column in a matrix.
|
| 178 |
+
This can be accomplished with:
|
| 179 |
+
|
| 180 |
+
<table class="example">
|
| 181 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 182 |
+
<tr><td>
|
| 183 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp
|
| 184 |
+
</td>
|
| 185 |
+
<td>
|
| 186 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.out
|
| 187 |
+
</td></tr></table>
|
| 188 |
+
|
| 189 |
+
We can interpret the instruction <tt>mat.colwise() += v</tt> in two equivalent ways. It adds the vector \c v
|
| 190 |
+
to every column of the matrix. Alternatively, it can be interpreted as repeating the vector \c v four times to
|
| 191 |
+
form a four-by-two matrix which is then added to \c mat:
|
| 192 |
+
\f[
|
| 193 |
+
\begin{bmatrix} 1 & 2 & 6 & 9 \\ 3 & 1 & 7 & 2 \end{bmatrix}
|
| 194 |
+
+ \begin{bmatrix} 0 & 0 & 0 & 0 \\ 1 & 1 & 1 & 1 \end{bmatrix}
|
| 195 |
+
= \begin{bmatrix} 1 & 2 & 6 & 9 \\ 4 & 2 & 8 & 3 \end{bmatrix}.
|
| 196 |
+
\f]
|
| 197 |
+
The operators <tt>-=</tt>, <tt>+</tt> and <tt>-</tt> can also be used column-wise and row-wise. On arrays, we
|
| 198 |
+
can also use the operators <tt>*=</tt>, <tt>/=</tt>, <tt>*</tt> and <tt>/</tt> to perform coefficient-wise
|
| 199 |
+
multiplication and division column-wise or row-wise. These operators are not available on matrices because it
|
| 200 |
+
is not clear what they would do. If you want multiply column 0 of a matrix \c mat with \c v(0), column 1 with
|
| 201 |
+
\c v(1), and so on, then use <tt>mat = mat * v.asDiagonal()</tt>.
|
| 202 |
+
|
| 203 |
+
It is important to point out that the vector to be added column-wise or row-wise must be of type Vector,
|
| 204 |
+
and cannot be a Matrix. If this is not met then you will get compile-time error. This also means that
|
| 205 |
+
broadcasting operations can only be applied with an object of type Vector, when operating with Matrix.
|
| 206 |
+
The same applies for the Array class, where the equivalent for VectorXf is ArrayXf. As always, you should
|
| 207 |
+
not mix arrays and matrices in the same expression.
|
| 208 |
+
|
| 209 |
+
To perform the same operation row-wise we can do:
|
| 210 |
+
|
| 211 |
+
<table class="example">
|
| 212 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 213 |
+
<tr><td>
|
| 214 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp
|
| 215 |
+
</td>
|
| 216 |
+
<td>
|
| 217 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.out
|
| 218 |
+
</td></tr></table>
|
| 219 |
+
|
| 220 |
+
\subsection TutorialReductionsVisitorsBroadcastingBroadcastingCombined Combining broadcasting with other operations
|
| 221 |
+
Broadcasting can also be combined with other operations, such as Matrix or Array operations,
|
| 222 |
+
reductions and partial reductions.
|
| 223 |
+
|
| 224 |
+
Now that broadcasting, reductions and partial reductions have been introduced, we can dive into a more advanced example that finds
|
| 225 |
+
the nearest neighbour of a vector <tt>v</tt> within the columns of matrix <tt>m</tt>. The Euclidean distance will be used in this example,
|
| 226 |
+
computing the squared Euclidean distance with the partial reduction named \link MatrixBase::squaredNorm() squaredNorm() \endlink:
|
| 227 |
+
|
| 228 |
+
<table class="example">
|
| 229 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 230 |
+
<tr><td>
|
| 231 |
+
\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
|
| 232 |
+
</td>
|
| 233 |
+
<td>
|
| 234 |
+
\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.out
|
| 235 |
+
</td></tr></table>
|
| 236 |
+
|
| 237 |
+
The line that does the job is
|
| 238 |
+
\code
|
| 239 |
+
(m.colwise() - v).colwise().squaredNorm().minCoeff(&index);
|
| 240 |
+
\endcode
|
| 241 |
+
|
| 242 |
+
We will go step by step to understand what is happening:
|
| 243 |
+
|
| 244 |
+
- <tt>m.colwise() - v</tt> is a broadcasting operation, subtracting <tt>v</tt> from each column in <tt>m</tt>. The result of this operation
|
| 245 |
+
is a new matrix whose size is the same as matrix <tt>m</tt>: \f[
|
| 246 |
+
\mbox{m.colwise() - v} =
|
| 247 |
+
\begin{bmatrix}
|
| 248 |
+
-1 & 21 & 4 & 7 \\
|
| 249 |
+
0 & 8 & 4 & -1
|
| 250 |
+
\end{bmatrix}
|
| 251 |
+
\f]
|
| 252 |
+
|
| 253 |
+
- <tt>(m.colwise() - v).colwise().squaredNorm()</tt> is a partial reduction, computing the squared norm column-wise. The result of
|
| 254 |
+
this operation is a row vector where each coefficient is the squared Euclidean distance between each column in <tt>m</tt> and <tt>v</tt>: \f[
|
| 255 |
+
\mbox{(m.colwise() - v).colwise().squaredNorm()} =
|
| 256 |
+
\begin{bmatrix}
|
| 257 |
+
1 & 505 & 32 & 50
|
| 258 |
+
\end{bmatrix}
|
| 259 |
+
\f]
|
| 260 |
+
|
| 261 |
+
- Finally, <tt>minCoeff(&index)</tt> is used to obtain the index of the column in <tt>m</tt> that is closest to <tt>v</tt> in terms of Euclidean
|
| 262 |
+
distance.
|
| 263 |
+
|
| 264 |
+
*/
|
| 265 |
+
|
| 266 |
+
}
|
include/eigen/doc/TutorialReshape.dox
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialReshape Reshape
|
| 4 |
+
|
| 5 |
+
Since the version 3.4, %Eigen exposes convenient methods to reshape a matrix to another matrix of different sizes or vector.
|
| 6 |
+
All cases are handled via the `DenseBase::reshaped(NRowsType,NColsType)` and `DenseBase::reshaped()` functions.
|
| 7 |
+
Those functions do not perform in-place reshaping, but instead return a <i> view </i> on the input expression.
|
| 8 |
+
|
| 9 |
+
\eigenAutoToc
|
| 10 |
+
|
| 11 |
+
\section TutorialReshapeMat2Mat Reshaped 2D views
|
| 12 |
+
|
| 13 |
+
The more general reshaping transformation is handled via: `reshaped(nrows,ncols)`.
|
| 14 |
+
Here is an example reshaping a 4x4 matrix to a 2x8 one:
|
| 15 |
+
|
| 16 |
+
<table class="example">
|
| 17 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 18 |
+
<tr><td>
|
| 19 |
+
\include MatrixBase_reshaped_int_int.cpp
|
| 20 |
+
</td>
|
| 21 |
+
<td>
|
| 22 |
+
\verbinclude MatrixBase_reshaped_int_int.out
|
| 23 |
+
</td></tr></table>
|
| 24 |
+
|
| 25 |
+
By default, the input coefficients are always interpreted in column-major order regardless of the storage order of the input expression.
|
| 26 |
+
For more control on ordering, compile-time sizes, and automatic size deduction, please see de documentation of `DenseBase::reshaped(NRowsType,NColsType)` that contains all the details with many examples.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
\section TutorialReshapeMat2Vec 1D linear views
|
| 30 |
+
|
| 31 |
+
A very common usage of reshaping is to create a 1D linear view over a given 2D matrix or expression.
|
| 32 |
+
In this case, sizes can be deduced and thus omitted as in the following example:
|
| 33 |
+
|
| 34 |
+
<table class="example">
|
| 35 |
+
<tr><th>Example:</th></tr>
|
| 36 |
+
<tr><td>
|
| 37 |
+
\include MatrixBase_reshaped_to_vector.cpp
|
| 38 |
+
</td></tr>
|
| 39 |
+
<tr><th>Output:</th></tr>
|
| 40 |
+
<tr><td>
|
| 41 |
+
\verbinclude MatrixBase_reshaped_to_vector.out
|
| 42 |
+
</td></tr></table>
|
| 43 |
+
|
| 44 |
+
This shortcut always returns a column vector and by default input coefficients are always interpreted in column-major order.
|
| 45 |
+
Again, see the documentation of DenseBase::reshaped() for more control on the ordering.
|
| 46 |
+
|
| 47 |
+
\section TutorialReshapeInPlace
|
| 48 |
+
|
| 49 |
+
The above examples create reshaped views, but what about reshaping inplace a given matrix?
|
| 50 |
+
Of course this task in only conceivable for matrix and arrays having runtime dimensions.
|
| 51 |
+
In many cases, this can be accomplished via PlainObjectBase::resize(Index,Index):
|
| 52 |
+
|
| 53 |
+
<table class="example">
|
| 54 |
+
<tr><th>Example:</th></tr>
|
| 55 |
+
<tr><td>
|
| 56 |
+
\include Tutorial_reshaped_vs_resize_1.cpp
|
| 57 |
+
</td></tr>
|
| 58 |
+
<tr><th>Output:</th></tr>
|
| 59 |
+
<tr><td>
|
| 60 |
+
\verbinclude Tutorial_reshaped_vs_resize_1.out
|
| 61 |
+
</td></tr></table>
|
| 62 |
+
|
| 63 |
+
However beware that unlike \c reshaped, the result of \c resize depends on the input storage order.
|
| 64 |
+
It thus behaves similarly to `reshaped<AutoOrder>`:
|
| 65 |
+
|
| 66 |
+
<table class="example">
|
| 67 |
+
<tr><th>Example:</th></tr>
|
| 68 |
+
<tr><td>
|
| 69 |
+
\include Tutorial_reshaped_vs_resize_2.cpp
|
| 70 |
+
</td></tr>
|
| 71 |
+
<tr><th>Output:</th></tr>
|
| 72 |
+
<tr><td>
|
| 73 |
+
\verbinclude Tutorial_reshaped_vs_resize_2.out
|
| 74 |
+
</td></tr></table>
|
| 75 |
+
|
| 76 |
+
Finally, assigning a reshaped matrix to itself is currently not supported and will result to undefined-behavior because of \link TopicAliasing aliasing \endlink.
|
| 77 |
+
The following is forbidden: \code A = A.reshaped(2,8); \endcode
|
| 78 |
+
This is OK: \code A = A.reshaped(2,8).eval(); \endcode
|
| 79 |
+
|
| 80 |
+
*/
|
| 81 |
+
|
| 82 |
+
}
|
include/eigen/doc/TutorialSTL.dox
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialSTL STL iterators and algorithms
|
| 4 |
+
|
| 5 |
+
Since the version 3.4, %Eigen's dense matrices and arrays provide STL compatible iterators.
|
| 6 |
+
As demonstrated below, this makes them naturally compatible with range-for-loops and STL's algorithms.
|
| 7 |
+
|
| 8 |
+
\eigenAutoToc
|
| 9 |
+
|
| 10 |
+
\section TutorialSTLVectors Iterating over 1D arrays and vectors
|
| 11 |
+
|
| 12 |
+
Any dense 1D expressions exposes the pair of `begin()/end()` methods to iterate over them.
|
| 13 |
+
|
| 14 |
+
This directly enables c++11 range for loops:
|
| 15 |
+
<table class="example">
|
| 16 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 17 |
+
<tr><td>
|
| 18 |
+
\include Tutorial_range_for_loop_1d_cxx11.cpp
|
| 19 |
+
</td>
|
| 20 |
+
<td>
|
| 21 |
+
\verbinclude Tutorial_range_for_loop_1d_cxx11.out
|
| 22 |
+
</td></tr></table>
|
| 23 |
+
|
| 24 |
+
One dimensional expressions can also easily be passed to STL algorithms:
|
| 25 |
+
<table class="example">
|
| 26 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 27 |
+
<tr><td>
|
| 28 |
+
\include Tutorial_std_sort.cpp
|
| 29 |
+
</td>
|
| 30 |
+
<td>
|
| 31 |
+
\verbinclude Tutorial_std_sort.out
|
| 32 |
+
</td></tr></table>
|
| 33 |
+
|
| 34 |
+
Similar to `std::vector`, 1D expressions also exposes the pair of `cbegin()/cend()` methods to conveniently get const iterators on non-const object.
|
| 35 |
+
|
| 36 |
+
\section TutorialSTLMatrices Iterating over coefficients of 2D arrays and matrices
|
| 37 |
+
|
| 38 |
+
STL iterators are intrinsically designed to iterate over 1D structures.
|
| 39 |
+
This is why `begin()/end()` methods are disabled for 2D expressions.
|
| 40 |
+
Iterating over all coefficients of a 2D expressions is still easily accomplished by creating a 1D linear view through `reshaped()`:
|
| 41 |
+
<table class="example">
|
| 42 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 43 |
+
<tr><td>
|
| 44 |
+
\include Tutorial_range_for_loop_2d_cxx11.cpp
|
| 45 |
+
</td>
|
| 46 |
+
<td>
|
| 47 |
+
\verbinclude Tutorial_range_for_loop_2d_cxx11.out
|
| 48 |
+
</td></tr></table>
|
| 49 |
+
|
| 50 |
+
\section TutorialSTLRowsColumns Iterating over rows or columns of 2D arrays and matrices
|
| 51 |
+
|
| 52 |
+
It is also possible to get iterators over rows or columns of 2D expressions.
|
| 53 |
+
Those are available through the `rowwise()` and `colwise()` proxies.
|
| 54 |
+
Here is an example sorting each row of a matrix:
|
| 55 |
+
<table class="example">
|
| 56 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 57 |
+
<tr><td>
|
| 58 |
+
\include Tutorial_std_sort_rows_cxx11.cpp
|
| 59 |
+
</td>
|
| 60 |
+
<td>
|
| 61 |
+
\verbinclude Tutorial_std_sort_rows_cxx11.out
|
| 62 |
+
</td></tr></table>
|
| 63 |
+
|
| 64 |
+
*/
|
| 65 |
+
|
| 66 |
+
}
|
include/eigen/doc/TutorialSlicingIndexing.dox
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TutorialSlicingIndexing Slicing and Indexing
|
| 4 |
+
|
| 5 |
+
This page presents the numerous possibilities offered by `operator()` to index sub-set of rows and columns.
|
| 6 |
+
This API has been introduced in %Eigen 3.4.
|
| 7 |
+
It supports all the feature proposed by the \link TutorialBlockOperations block API \endlink, and much more.
|
| 8 |
+
In particular, it supports \b slicing that consists in taking a set of rows, columns, or elements, uniformly spaced within a matrix or indexed from an array of indices.
|
| 9 |
+
|
| 10 |
+
\eigenAutoToc
|
| 11 |
+
|
| 12 |
+
\section TutorialSlicingOverview Overview
|
| 13 |
+
|
| 14 |
+
All the aforementioned operations are handled through the generic DenseBase::operator()(const RowIndices&, const ColIndices&) method.
|
| 15 |
+
Each argument can be:
|
| 16 |
+
- An integer indexing a single row or column, including symbolic indices.
|
| 17 |
+
- The symbol Eigen::indexing::all representing the whole set of respective rows or columns in increasing order.
|
| 18 |
+
- An ArithmeticSequence as constructed by the Eigen::seq, Eigen::seqN, or Eigen::indexing::lastN functions.
|
| 19 |
+
- Any 1D vector/array of integers including %Eigen's vector/array, expressions, std::vector, std::array, as well as plain C arrays: `int[N]`.
|
| 20 |
+
|
| 21 |
+
More generally, it can accepts any object exposing the following two member functions:
|
| 22 |
+
\code
|
| 23 |
+
<integral type> operator[](<integral type>) const;
|
| 24 |
+
<integral type> size() const;
|
| 25 |
+
\endcode
|
| 26 |
+
where `<integral type>` stands for any integer type compatible with Eigen::Index (i.e. `std::ptrdiff_t`).
|
| 27 |
+
|
| 28 |
+
\section TutorialSlicingBasic Basic slicing
|
| 29 |
+
|
| 30 |
+
Taking a set of rows, columns, or elements, uniformly spaced within a matrix or vector is achieved through the Eigen::seq or Eigen::seqN functions where "seq" stands for arithmetic sequence. Their signatures are summarized below:
|
| 31 |
+
|
| 32 |
+
<table class="manual">
|
| 33 |
+
<tr>
|
| 34 |
+
<th>function</th>
|
| 35 |
+
<th>description</th>
|
| 36 |
+
<th>example</th>
|
| 37 |
+
</tr>
|
| 38 |
+
<tr>
|
| 39 |
+
<td>\code seq(firstIdx,lastIdx) \endcode</td>
|
| 40 |
+
<td>represents the sequence of integers ranging from \c firstIdx to \c lastIdx</td>
|
| 41 |
+
<td>\code seq(2,5) <=> {2,3,4,5} \endcode</td>
|
| 42 |
+
</tr>
|
| 43 |
+
<tr>
|
| 44 |
+
<td>\code seq(firstIdx,lastIdx,incr) \endcode</td>
|
| 45 |
+
<td>same but using the increment \c incr to advance from one index to the next</td>
|
| 46 |
+
<td>\code seq(2,8,2) <=> {2,4,6,8} \endcode</td>
|
| 47 |
+
</tr>
|
| 48 |
+
<tr>
|
| 49 |
+
<td>\code seqN(firstIdx,size) \endcode</td>
|
| 50 |
+
<td>represents the sequence of \c size integers starting from \c firstIdx</td>
|
| 51 |
+
<td>\code seqN(2,5) <=> {2,3,4,5,6} \endcode</td>
|
| 52 |
+
</tr>
|
| 53 |
+
<tr>
|
| 54 |
+
<td>\code seqN(firstIdx,size,incr) \endcode</td>
|
| 55 |
+
<td>same but using the increment \c incr to advance from one index to the next</td>
|
| 56 |
+
<td>\code seqN(2,3,3) <=> {2,5,8} \endcode</td>
|
| 57 |
+
</tr>
|
| 58 |
+
</table>
|
| 59 |
+
|
| 60 |
+
The \c firstIdx and \c lastIdx parameters can also be defined with the help of the Eigen::last symbol representing the index of the last row, column or element of the underlying matrix/vector once the arithmetic sequence is passed to it through operator().
|
| 61 |
+
Here are some examples for a 2D array/matrix \c A and a 1D array/vector \c v.
|
| 62 |
+
<table class="manual">
|
| 63 |
+
<tr>
|
| 64 |
+
<th>Intent</th>
|
| 65 |
+
<th>Code</th>
|
| 66 |
+
<th>Block-API equivalence</th>
|
| 67 |
+
</tr>
|
| 68 |
+
<tr>
|
| 69 |
+
<td>Bottom-left corner starting at row \c i with \c n columns</td>
|
| 70 |
+
<td>\code A(seq(i,last), seqN(0,n)) \endcode</td>
|
| 71 |
+
<td>\code A.bottomLeftCorner(A.rows()-i,n) \endcode</td>
|
| 72 |
+
</tr>
|
| 73 |
+
<tr>
|
| 74 |
+
<td>%Block starting at \c i,j having \c m rows, and \c n columns</td>
|
| 75 |
+
<td>\code A(seqN(i,m), seqN(j,n)) \endcode</td>
|
| 76 |
+
<td>\code A.block(i,j,m,n) \endcode</td>
|
| 77 |
+
</tr>
|
| 78 |
+
<tr>
|
| 79 |
+
<td>%Block starting at \c i0,j0 and ending at \c i1,j1</td>
|
| 80 |
+
<td>\code A(seq(i0,i1), seq(j0,j1)) \endcode</td>
|
| 81 |
+
<td>\code A.block(i0,j0,i1-i0+1,j1-j0+1) \endcode</td>
|
| 82 |
+
</tr>
|
| 83 |
+
<tr>
|
| 84 |
+
<td>Even columns of A</td>
|
| 85 |
+
<td>\code A(all, seq(0,last,2)) \endcode</td>
|
| 86 |
+
<td></td>
|
| 87 |
+
</tr>
|
| 88 |
+
<tr>
|
| 89 |
+
<td>First \c n odd rows of A</td>
|
| 90 |
+
<td>\code A(seqN(1,n,2), all) \endcode</td>
|
| 91 |
+
<td></td>
|
| 92 |
+
</tr>
|
| 93 |
+
<tr>
|
| 94 |
+
<td>The second-last column</td>
|
| 95 |
+
<td>\code A(all, last-1) \endcode</td>
|
| 96 |
+
<td>\code A.col(A.cols()-2) \endcode</td>
|
| 97 |
+
</tr>
|
| 98 |
+
<tr>
|
| 99 |
+
<td>The middle row</td>
|
| 100 |
+
<td>\code A(last/2, all) \endcode</td>
|
| 101 |
+
<td>\code A.row((A.rows()-1)/2) \endcode</td>
|
| 102 |
+
</tr>
|
| 103 |
+
<tr>
|
| 104 |
+
<td>Last elements of v starting at i</td>
|
| 105 |
+
<td>\code v(seq(i,last)) \endcode</td>
|
| 106 |
+
<td>\code v.tail(v.size()-i) \endcode</td>
|
| 107 |
+
</tr>
|
| 108 |
+
<tr>
|
| 109 |
+
<td>Last \c n elements of v</td>
|
| 110 |
+
<td>\code v(seq(last+1-n,last)) \endcode</td>
|
| 111 |
+
<td>\code v.tail(n) \endcode</td>
|
| 112 |
+
</tr>
|
| 113 |
+
</table>
|
| 114 |
+
|
| 115 |
+
As seen in the last example, referencing the <i> last n </i> elements (or rows/columns) is a bit cumbersome to write.
|
| 116 |
+
This becomes even more tricky and error prone with a non-default increment.
|
| 117 |
+
Here comes \link indexing_lastN Eigen::indexing::lastN(size) \endlink, and
|
| 118 |
+
\link indexing_lastN_with_incr Eigen::indexing::lastN(size,incr) \endlink:
|
| 119 |
+
|
| 120 |
+
<table class="manual">
|
| 121 |
+
<tr>
|
| 122 |
+
<th>Intent</th>
|
| 123 |
+
<th>Code</th>
|
| 124 |
+
<th>Block-API equivalence</th>
|
| 125 |
+
</tr>
|
| 126 |
+
<tr>
|
| 127 |
+
<td>Last \c n elements of v</td>
|
| 128 |
+
<td>\code v(lastN(n)) \endcode</td>
|
| 129 |
+
<td>\code v.tail(n) \endcode</td>
|
| 130 |
+
</tr>
|
| 131 |
+
<tr>
|
| 132 |
+
<td>Bottom-right corner of A of size \c m times \c n</td>
|
| 133 |
+
<td>\code A(lastN(m), lastN(n)) \endcode</td>
|
| 134 |
+
<td>\code A.bottomRightCorner(m,n) \endcode</td>
|
| 135 |
+
</tr>
|
| 136 |
+
<tr>
|
| 137 |
+
<td>Bottom-right corner of A of size \c m times \c n</td>
|
| 138 |
+
<td>\code A(lastN(m), lastN(n)) \endcode</td>
|
| 139 |
+
<td>\code A.bottomRightCorner(m,n) \endcode</td>
|
| 140 |
+
</tr>
|
| 141 |
+
<tr>
|
| 142 |
+
<td>Last \c n columns taking 1 column over 3</td>
|
| 143 |
+
<td>\code A(all, lastN(n,3)) \endcode</td>
|
| 144 |
+
<td></td>
|
| 145 |
+
</tr>
|
| 146 |
+
</table>
|
| 147 |
+
|
| 148 |
+
\section TutorialSlicingFixed Compile time size and increment
|
| 149 |
+
|
| 150 |
+
In terms of performance, %Eigen and the compiler can take advantage of compile-time size and increment.
|
| 151 |
+
To this end, you can enforce compile-time parameters using Eigen::fix<val>.
|
| 152 |
+
Such compile-time value can be combined with the Eigen::last symbol:
|
| 153 |
+
\code v(seq(last-fix<7>, last-fix<2>))
|
| 154 |
+
\endcode
|
| 155 |
+
In this example %Eigen knowns at compile-time that the returned expression has 6 elements.
|
| 156 |
+
It is equivalent to:
|
| 157 |
+
\code v(seqN(last-7, fix<6>))
|
| 158 |
+
\endcode
|
| 159 |
+
|
| 160 |
+
We can revisit the <i>even columns of A</i> example as follows:
|
| 161 |
+
\code A(all, seq(fix<0>,last,fix<2>))
|
| 162 |
+
\endcode
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
\section TutorialSlicingReverse Reverse order
|
| 166 |
+
|
| 167 |
+
Row/column indices can also be enumerated in decreasing order using a negative increment.
|
| 168 |
+
For instance, one over two columns of A from the column 20 to 10:
|
| 169 |
+
\code A(all, seq(20, 10, fix<-2>))
|
| 170 |
+
\endcode
|
| 171 |
+
The last \c n rows starting from the last one:
|
| 172 |
+
\code A(seqN(last, n, fix<-1>), all)
|
| 173 |
+
\endcode
|
| 174 |
+
You can also use the ArithmeticSequence::reverse() method to reverse its order.
|
| 175 |
+
The previous example can thus also be written as:
|
| 176 |
+
\code A(lastN(n).reverse(), all)
|
| 177 |
+
\endcode
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
\section TutorialSlicingArray Array of indices
|
| 181 |
+
|
| 182 |
+
The generic `operator()` can also takes as input an arbitrary list of row or column indices stored as either an `ArrayXi`, a `std::vector<int>`, `std::array<int,N>`, etc.
|
| 183 |
+
|
| 184 |
+
<table class="example">
|
| 185 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 186 |
+
<tr><td>
|
| 187 |
+
\include Slicing_stdvector_cxx11.cpp
|
| 188 |
+
</td>
|
| 189 |
+
<td>
|
| 190 |
+
\verbinclude Slicing_stdvector_cxx11.out
|
| 191 |
+
</td></tr></table>
|
| 192 |
+
|
| 193 |
+
You can also directly pass a static array:
|
| 194 |
+
<table class="example">
|
| 195 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 196 |
+
<tr><td>
|
| 197 |
+
\include Slicing_rawarray_cxx11.cpp
|
| 198 |
+
</td>
|
| 199 |
+
<td>
|
| 200 |
+
\verbinclude Slicing_rawarray_cxx11.out
|
| 201 |
+
</td></tr></table>
|
| 202 |
+
|
| 203 |
+
or expressions:
|
| 204 |
+
<table class="example">
|
| 205 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 206 |
+
<tr><td>
|
| 207 |
+
\include Slicing_arrayexpr.cpp
|
| 208 |
+
</td>
|
| 209 |
+
<td>
|
| 210 |
+
\verbinclude Slicing_arrayexpr.out
|
| 211 |
+
</td></tr></table>
|
| 212 |
+
|
| 213 |
+
When passing an object with a compile-time size such as `Array4i`, `std::array<int,N>`, or a static array, then the returned expression also exhibit compile-time dimensions.
|
| 214 |
+
|
| 215 |
+
\section TutorialSlicingCustomArray Custom index list
|
| 216 |
+
|
| 217 |
+
More generally, `operator()` can accept as inputs any object \c ind of type \c T compatible with:
|
| 218 |
+
\code
|
| 219 |
+
Index s = ind.size(); or Index s = size(ind);
|
| 220 |
+
Index i;
|
| 221 |
+
i = ind[i];
|
| 222 |
+
\endcode
|
| 223 |
+
|
| 224 |
+
This means you can easily build your own fancy sequence generator and pass it to `operator()`.
|
| 225 |
+
Here is an example enlarging a given matrix while padding the additional first rows and columns through repetition:
|
| 226 |
+
|
| 227 |
+
<table class="example">
|
| 228 |
+
<tr><th>Example:</th><th>Output:</th></tr>
|
| 229 |
+
<tr><td>
|
| 230 |
+
\include Slicing_custom_padding_cxx11.cpp
|
| 231 |
+
</td>
|
| 232 |
+
<td>
|
| 233 |
+
\verbinclude Slicing_custom_padding_cxx11.out
|
| 234 |
+
</td></tr></table>
|
| 235 |
+
|
| 236 |
+
<br>
|
| 237 |
+
|
| 238 |
+
*/
|
| 239 |
+
|
| 240 |
+
/*
|
| 241 |
+
TODO add:
|
| 242 |
+
so_repeat_inner.cpp
|
| 243 |
+
so_repeleme.cpp
|
| 244 |
+
*/
|
| 245 |
+
}
|
include/eigen/doc/UnalignedArrayAssert.dox
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
namespace Eigen {
|
| 2 |
+
|
| 3 |
+
/** \eigenManualPage TopicUnalignedArrayAssert Explanation of the assertion on unaligned arrays
|
| 4 |
+
|
| 5 |
+
Hello! You are seeing this webpage because your program terminated on an assertion failure like this one:
|
| 6 |
+
<pre>
|
| 7 |
+
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
|
| 8 |
+
Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
|
| 9 |
+
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
|
| 10 |
+
Assertion `(reinterpret_cast<size_t>(array) & (sizemask)) == 0 && "this assertion
|
| 11 |
+
is explained here: http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html
|
| 12 |
+
**** READ THIS WEB PAGE !!! ****"' failed.
|
| 13 |
+
</pre>
|
| 14 |
+
|
| 15 |
+
There are 4 known causes for this issue.
|
| 16 |
+
If you can target \cpp17 only with a recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then you're lucky: enabling c++17 should be enough (if not, please <a href="http://eigen.tuxfamily.org/bz/">report</a> to us).
|
| 17 |
+
Otherwise, please read on to understand those issues and learn how to fix them.
|
| 18 |
+
|
| 19 |
+
\eigenAutoToc
|
| 20 |
+
|
| 21 |
+
\section where Where in my own code is the cause of the problem?
|
| 22 |
+
|
| 23 |
+
First of all, you need to find out where in your own code this assertion was triggered from. At first glance, the error message doesn't look helpful, as it refers to a file inside Eigen! However, since your program crashed, if you can reproduce the crash, you can get a backtrace using any debugger. For example, if you're using GCC, you can use the GDB debugger as follows:
|
| 24 |
+
\code
|
| 25 |
+
$ gdb ./my_program # Start GDB on your program
|
| 26 |
+
> run # Start running your program
|
| 27 |
+
... # Now reproduce the crash!
|
| 28 |
+
> bt # Obtain the backtrace
|
| 29 |
+
\endcode
|
| 30 |
+
Now that you know precisely where in your own code the problem is happening, read on to understand what you need to change.
|
| 31 |
+
|
| 32 |
+
\section c1 Cause 1: Structures having Eigen objects as members
|
| 33 |
+
|
| 34 |
+
If you have code like this,
|
| 35 |
+
|
| 36 |
+
\code
|
| 37 |
+
class Foo
|
| 38 |
+
{
|
| 39 |
+
//...
|
| 40 |
+
Eigen::Vector4d v;
|
| 41 |
+
//...
|
| 42 |
+
};
|
| 43 |
+
//...
|
| 44 |
+
Foo *foo = new Foo;
|
| 45 |
+
\endcode
|
| 46 |
+
|
| 47 |
+
then you need to read this separate page: \ref TopicStructHavingEigenMembers "Structures Having Eigen Members".
|
| 48 |
+
|
| 49 |
+
Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
| 50 |
+
|
| 51 |
+
\section c2 Cause 2: STL Containers or manual memory allocation
|
| 52 |
+
|
| 53 |
+
If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this,
|
| 54 |
+
|
| 55 |
+
\code
|
| 56 |
+
std::vector<Eigen::Matrix2d> my_vector;
|
| 57 |
+
struct my_class { ... Eigen::Matrix2d m; ... };
|
| 58 |
+
std::map<int, my_class> my_map;
|
| 59 |
+
\endcode
|
| 60 |
+
|
| 61 |
+
then you need to read this separate page: \ref TopicStlContainers "Using STL Containers with Eigen".
|
| 62 |
+
|
| 63 |
+
Note that here, Eigen::Matrix2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
|
| 64 |
+
|
| 65 |
+
The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c `std::make_shared` or `std::allocate_shared` for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
|
| 66 |
+
|
| 67 |
+
\section c3 Cause 3: Passing Eigen objects by value
|
| 68 |
+
|
| 69 |
+
If some function in your code is getting an %Eigen object passed by value, like this,
|
| 70 |
+
|
| 71 |
+
\code
|
| 72 |
+
void func(Eigen::Vector4d v);
|
| 73 |
+
\endcode
|
| 74 |
+
|
| 75 |
+
then you need to read this separate page: \ref TopicPassingByValue "Passing Eigen objects by value to functions".
|
| 76 |
+
|
| 77 |
+
Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
| 78 |
+
|
| 79 |
+
\section c4 Cause 4: Compiler making a wrong assumption on stack alignment (for instance GCC on Windows)
|
| 80 |
+
|
| 81 |
+
This is a must-read for people using GCC on Windows (like MinGW or TDM-GCC). If you have this assertion failure in an innocent function declaring a local variable like this:
|
| 82 |
+
|
| 83 |
+
\code
|
| 84 |
+
void foo()
|
| 85 |
+
{
|
| 86 |
+
Eigen::Quaternionf q;
|
| 87 |
+
//...
|
| 88 |
+
}
|
| 89 |
+
\endcode
|
| 90 |
+
|
| 91 |
+
then you need to read this separate page: \ref TopicWrongStackAlignment "Compiler making a wrong assumption on stack alignment".
|
| 92 |
+
|
| 93 |
+
Note that here, Eigen::Quaternionf is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
\section explanation General explanation of this assertion
|
| 97 |
+
|
| 98 |
+
\ref TopicFixedSizeVectorizable "Fixed-size vectorizable Eigen objects" must absolutely be created at properly aligned locations, otherwise SIMD instructions addressing them will crash.
|
| 99 |
+
For instance, SSE/NEON/MSA/Altivec/VSX targets will require 16-byte-alignment, whereas AVX and AVX512 targets may require up to 32 and 64 byte alignment respectively.
|
| 100 |
+
|
| 101 |
+
%Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their `operator new`.
|
| 102 |
+
|
| 103 |
+
However there are a few corner cases where these alignment settings get overridden: they are the possible causes for this assertion.
|
| 104 |
+
|
| 105 |
+
\section getrid I don't care about optimal vectorization, how do I get rid of that stuff?
|
| 106 |
+
|
| 107 |
+
Three possibilities:
|
| 108 |
+
<ul>
|
| 109 |
+
<li>Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way %Eigen won't try to over-align them, and thus won't assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.</li>
|
| 110 |
+
<li>Define \link TopicPreprocessorDirectivesPerformance EIGEN_MAX_STATIC_ALIGN_BYTES \endlink to 0. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of
|
| 111 |
+
vectorizing fixed-size objects (like Matrix4d) through unaligned stores (as controlled by \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink), while keeping unchanged the vectorization of dynamic-size objects
|
| 112 |
+
(like MatrixXd). On 64 bytes systems, you might also define it 16 to disable only 32 and 64 bytes of over-alignment. But do note that this breaks ABI compatibility with the default behavior of static alignment.</li>
|
| 113 |
+
<li>Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and `EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT`. This keeps the
|
| 114 |
+
16-byte (or above) alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
|
| 115 |
+
</ul>
|
| 116 |
+
|
| 117 |
+
If you want to know why defining `EIGEN_DONT_VECTORIZE` does not by itself disable 16-byte (or above) alignment and the assertion, here's the explanation:
|
| 118 |
+
|
| 119 |
+
It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
|
| 120 |
+
It doesn't disable 16-byte (or above) alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
|
| 121 |
+
|
| 122 |
+
\section checkmycode How can I check my code is safe regarding alignment issues?
|
| 123 |
+
|
| 124 |
+
Unfortunately, there is no possibility in c++ to detect any of the aforementioned shortcoming at compile time (though static analyzers are becoming more and more powerful and could detect some of them).
|
| 125 |
+
Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page.
|
| 126 |
+
Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default.
|
| 127 |
+
|
| 128 |
+
The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink.
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
*/
|
| 132 |
+
|
| 133 |
+
}
|
include/eigen/doc/UsingBlasLapackBackends.dox
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
Copyright (c) 2011, Intel Corporation. All rights reserved.
|
| 3 |
+
Copyright (C) 2011-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
|
| 4 |
+
|
| 5 |
+
Redistribution and use in source and binary forms, with or without modification,
|
| 6 |
+
are permitted provided that the following conditions are met:
|
| 7 |
+
|
| 8 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
list of conditions and the following disclaimer.
|
| 10 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 11 |
+
this list of conditions and the following disclaimer in the documentation
|
| 12 |
+
and/or other materials provided with the distribution.
|
| 13 |
+
* Neither the name of Intel Corporation nor the names of its contributors may
|
| 14 |
+
be used to endorse or promote products derived from this software without
|
| 15 |
+
specific prior written permission.
|
| 16 |
+
|
| 17 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 18 |
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 19 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 20 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
| 21 |
+
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 22 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 23 |
+
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
| 24 |
+
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 25 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 26 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 27 |
+
|
| 28 |
+
********************************************************************************
|
| 29 |
+
* Content : Documentation on the use of BLAS/LAPACK libraries through Eigen
|
| 30 |
+
********************************************************************************
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
namespace Eigen {
|
| 34 |
+
|
| 35 |
+
/** \page TopicUsingBlasLapack Using BLAS/LAPACK from %Eigen
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
Since %Eigen version 3.3 and later, any F77 compatible BLAS or LAPACK libraries can be used as backends for dense matrix products and dense matrix decompositions.
|
| 39 |
+
For instance, one can use <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">Intel® MKL</a>, Apple's Accelerate framework on OSX, <a href="http://www.openblas.net/">OpenBLAS</a>, <a href="http://www.netlib.org/lapack">Netlib LAPACK</a>, etc.
|
| 40 |
+
|
| 41 |
+
Do not miss this \link TopicUsingIntelMKL page \endlink for further discussions on the specific use of Intel® MKL (also includes VML, PARDISO, etc.)
|
| 42 |
+
|
| 43 |
+
In order to use an external BLAS and/or LAPACK library, you must link you own application to the respective libraries and their dependencies.
|
| 44 |
+
For LAPACK, you must also link to the standard <a href="http://www.netlib.org/lapack/lapacke.html">Lapacke</a> library, which is used as a convenient think layer between %Eigen's C++ code and LAPACK F77 interface. Then you must activate their usage by defining one or multiple of the following macros (\b before including any %Eigen's header):
|
| 45 |
+
|
| 46 |
+
\note For Mac users, in order to use the lapack version shipped with the Accelerate framework, you also need the lapacke library.
|
| 47 |
+
Using <a href="https://www.macports.org/">MacPorts</a>, this is as easy as:
|
| 48 |
+
\code
|
| 49 |
+
sudo port install lapack
|
| 50 |
+
\endcode
|
| 51 |
+
and then use the following link flags: \c -framework \c Accelerate \c /opt/local/lib/lapack/liblapacke.dylib
|
| 52 |
+
|
| 53 |
+
<table class="manual">
|
| 54 |
+
<tr><td>\c EIGEN_USE_BLAS </td><td>Enables the use of external BLAS level 2 and 3 routines (compatible with any F77 BLAS interface)</td></tr>
|
| 55 |
+
<tr class="alt"><td>\c EIGEN_USE_LAPACKE </td><td>Enables the use of external Lapack routines via the <a href="http://www.netlib.org/lapack/lapacke.html">Lapacke</a> C interface to Lapack (compatible with any F77 LAPACK interface)</td></tr>
|
| 56 |
+
<tr><td>\c EIGEN_USE_LAPACKE_STRICT </td><td>Same as \c EIGEN_USE_LAPACKE but algorithms of lower numerical robustness are disabled. \n This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.</td></tr>
|
| 57 |
+
</table>
|
| 58 |
+
|
| 59 |
+
When doing so, a number of %Eigen's algorithms are silently substituted with calls to BLAS or LAPACK routines.
|
| 60 |
+
These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex<float>, and \c complex<double>.
|
| 61 |
+
Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms.
|
| 62 |
+
|
| 63 |
+
The breadth of %Eigen functionality that can be substituted is listed in the table below.
|
| 64 |
+
<table class="manual">
|
| 65 |
+
<tr><th>Functional domain</th><th>Code example</th><th>BLAS/LAPACK routines</th></tr>
|
| 66 |
+
<tr><td>Matrix-matrix operations \n \c EIGEN_USE_BLAS </td><td>\code
|
| 67 |
+
m1*m2.transpose();
|
| 68 |
+
m1.selfadjointView<Lower>()*m2;
|
| 69 |
+
m1*m2.triangularView<Upper>();
|
| 70 |
+
m1.selfadjointView<Lower>().rankUpdate(m2,1.0);
|
| 71 |
+
\endcode</td><td>\code
|
| 72 |
+
?gemm
|
| 73 |
+
?symm/?hemm
|
| 74 |
+
?trmm
|
| 75 |
+
dsyrk/ssyrk
|
| 76 |
+
\endcode</td></tr>
|
| 77 |
+
<tr class="alt"><td>Matrix-vector operations \n \c EIGEN_USE_BLAS </td><td>\code
|
| 78 |
+
m1.adjoint()*b;
|
| 79 |
+
m1.selfadjointView<Lower>()*b;
|
| 80 |
+
m1.triangularView<Upper>()*b;
|
| 81 |
+
\endcode</td><td>\code
|
| 82 |
+
?gemv
|
| 83 |
+
?symv/?hemv
|
| 84 |
+
?trmv
|
| 85 |
+
\endcode</td></tr>
|
| 86 |
+
<tr><td>LU decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
| 87 |
+
v1 = m1.lu().solve(v2);
|
| 88 |
+
\endcode</td><td>\code
|
| 89 |
+
?getrf
|
| 90 |
+
\endcode</td></tr>
|
| 91 |
+
<tr class="alt"><td>Cholesky decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
| 92 |
+
v1 = m2.selfadjointView<Upper>().llt().solve(v2);
|
| 93 |
+
\endcode</td><td>\code
|
| 94 |
+
?potrf
|
| 95 |
+
\endcode</td></tr>
|
| 96 |
+
<tr><td>QR decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
| 97 |
+
m1.householderQr();
|
| 98 |
+
m1.colPivHouseholderQr();
|
| 99 |
+
\endcode</td><td>\code
|
| 100 |
+
?geqrf
|
| 101 |
+
?geqp3
|
| 102 |
+
\endcode</td></tr>
|
| 103 |
+
<tr class="alt"><td>Singular value decomposition \n \c EIGEN_USE_LAPACKE </td><td>\code
|
| 104 |
+
JacobiSVD<MatrixXd> svd;
|
| 105 |
+
svd.compute(m1, ComputeThinV);
|
| 106 |
+
\endcode</td><td>\code
|
| 107 |
+
?gesvd
|
| 108 |
+
\endcode</td></tr>
|
| 109 |
+
<tr><td>Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
| 110 |
+
EigenSolver<MatrixXd> es(m1);
|
| 111 |
+
ComplexEigenSolver<MatrixXcd> ces(m1);
|
| 112 |
+
SelfAdjointEigenSolver<MatrixXd> saes(m1+m1.transpose());
|
| 113 |
+
GeneralizedSelfAdjointEigenSolver<MatrixXd>
|
| 114 |
+
gsaes(m1+m1.transpose(),m2+m2.transpose());
|
| 115 |
+
\endcode</td><td>\code
|
| 116 |
+
?gees
|
| 117 |
+
?gees
|
| 118 |
+
?syev/?heev
|
| 119 |
+
?syev/?heev,
|
| 120 |
+
?potrf
|
| 121 |
+
\endcode</td></tr>
|
| 122 |
+
<tr class="alt"><td>Schur decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT </td><td>\code
|
| 123 |
+
RealSchur<MatrixXd> schurR(m1);
|
| 124 |
+
ComplexSchur<MatrixXcd> schurC(m1);
|
| 125 |
+
\endcode</td><td>\code
|
| 126 |
+
?gees
|
| 127 |
+
\endcode</td></tr>
|
| 128 |
+
</table>
|
| 129 |
+
In the examples, m1 and m2 are dense matrices and v1 and v2 are dense vectors.
|
| 130 |
+
|
| 131 |
+
*/
|
| 132 |
+
|
| 133 |
+
}
|
include/eigen/doc/UsingNVCC.dox
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
namespace Eigen {
|
| 3 |
+
|
| 4 |
+
/** \page TopicCUDA Using Eigen in CUDA kernels
|
| 5 |
+
|
| 6 |
+
Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code.
|
| 7 |
+
This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header.
|
| 8 |
+
This might be useful to disable some warnings when a .cu file makes use of Eigen on the host side only.
|
| 9 |
+
However, in both cases, host's SIMD vectorization has to be disabled in .cu files.
|
| 10 |
+
It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files.
|
| 11 |
+
|
| 12 |
+
Known issues:
|
| 13 |
+
|
| 14 |
+
- \c nvcc with MS Visual Studio does not work (patch welcome)
|
| 15 |
+
|
| 16 |
+
- \c nvcc 5.5 with gcc-4.7 (or greater) has issues with the standard \c \<limits\> header file. To workaround this, you can add the following before including any other files:
|
| 17 |
+
\code
|
| 18 |
+
// workaround issue between gcc >= 4.7 and cuda 5.5
|
| 19 |
+
#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7)
|
| 20 |
+
#undef _GLIBCXX_ATOMIC_BUILTINS
|
| 21 |
+
#undef _GLIBCXX_USE_INT128
|
| 22 |
+
#endif
|
| 23 |
+
\endcode
|
| 24 |
+
|
| 25 |
+
- On 64bits system Eigen uses \c long \c int as the default type for indexes and sizes. On CUDA device, it would make sense to default to 32 bits \c int.
|
| 26 |
+
However, to keep host and CUDA code compatible, this cannot be done automatically by %Eigen, and the user is thus required to define \c EIGEN_DEFAULT_DENSE_INDEX_TYPE to \c int throughout his code (or only for CUDA code if there is no interaction between host and CUDA code through %Eigen's object).
|
| 27 |
+
|
| 28 |
+
*/
|
| 29 |
+
|
| 30 |
+
}
|
include/eigen/doc/eigen_navtree_hacks.js
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
// generate a table of contents in the side-nav based on the h1/h2 tags of the current page.
|
| 3 |
+
function generate_autotoc() {
|
| 4 |
+
var headers = $("h1, h2");
|
| 5 |
+
if(headers.length > 1) {
|
| 6 |
+
var toc = $("#side-nav").append('<div id="nav-toc" class="toc"><h3>Table of contents</h3></div>');
|
| 7 |
+
toc = $("#nav-toc");
|
| 8 |
+
var footer = $("#nav-path");
|
| 9 |
+
var footerHeight = footer.height();
|
| 10 |
+
toc = toc.append('<ul></ul>');
|
| 11 |
+
toc = toc.find('ul');
|
| 12 |
+
var indices = new Array();
|
| 13 |
+
indices[0] = 0;
|
| 14 |
+
indices[1] = 0;
|
| 15 |
+
|
| 16 |
+
var h1counts = $("h1").length;
|
| 17 |
+
headers.each(function(i) {
|
| 18 |
+
var current = $(this);
|
| 19 |
+
var levelTag = current[0].tagName.charAt(1);
|
| 20 |
+
if(h1counts==0)
|
| 21 |
+
levelTag--;
|
| 22 |
+
var cur_id = current.attr("id");
|
| 23 |
+
|
| 24 |
+
indices[levelTag-1]+=1;
|
| 25 |
+
var prefix = indices[0];
|
| 26 |
+
if (levelTag >1) {
|
| 27 |
+
prefix+="."+indices[1];
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// Uncomment to add number prefixes
|
| 31 |
+
// current.html(prefix + " " + current.html());
|
| 32 |
+
for(var l = levelTag; l < 2; ++l){
|
| 33 |
+
indices[l] = 0;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
if(cur_id == undefined) {
|
| 37 |
+
current.attr('id', 'title' + i);
|
| 38 |
+
current.addClass('anchor');
|
| 39 |
+
toc.append("<li class='level" + levelTag + "'><a id='link" + i + "' href='#title" +
|
| 40 |
+
i + "' title='" + current.prop("tagName") + "'>" + current.text() + "</a></li>");
|
| 41 |
+
} else {
|
| 42 |
+
toc.append("<li class='level" + levelTag + "'><a id='" + cur_id + "' href='#title" +
|
| 43 |
+
i + "' title='" + current.prop("tagName") + "'>" + current.text() + "</a></li>");
|
| 44 |
+
}
|
| 45 |
+
});
|
| 46 |
+
resizeHeight();
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
var global_navtree_object;
|
| 52 |
+
|
| 53 |
+
// Overloaded to remove links to sections/subsections
|
| 54 |
+
function getNode(o, po)
|
| 55 |
+
{
|
| 56 |
+
po.childrenVisited = true;
|
| 57 |
+
var l = po.childrenData.length-1;
|
| 58 |
+
for (var i in po.childrenData) {
|
| 59 |
+
var nodeData = po.childrenData[i];
|
| 60 |
+
if((!nodeData[1]) || (nodeData[1].indexOf('#')==-1)) // <- we added this line
|
| 61 |
+
po.children[i] = newNode(o, po, nodeData[0], nodeData[1], nodeData[2], i==l);
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
// Overloaded to adjust the size of the navtree wrt the toc
|
| 66 |
+
function resizeHeight()
|
| 67 |
+
{
|
| 68 |
+
var header = $("#top");
|
| 69 |
+
var sidenav = $("#side-nav");
|
| 70 |
+
var content = $("#doc-content");
|
| 71 |
+
var navtree = $("#nav-tree");
|
| 72 |
+
var footer = $("#nav-path");
|
| 73 |
+
var toc = $("#nav-toc");
|
| 74 |
+
|
| 75 |
+
var headerHeight = header.outerHeight();
|
| 76 |
+
var footerHeight = footer.outerHeight();
|
| 77 |
+
var tocHeight = toc.height();
|
| 78 |
+
var windowHeight = $(window).height() - headerHeight - footerHeight;
|
| 79 |
+
content.css({height:windowHeight + "px"});
|
| 80 |
+
navtree.css({height:(windowHeight-tocHeight) + "px"});
|
| 81 |
+
sidenav.css({height:windowHeight + "px"});
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
// Overloaded to save the root node into global_navtree_object
|
| 85 |
+
function initNavTree(toroot,relpath)
|
| 86 |
+
{
|
| 87 |
+
var o = new Object();
|
| 88 |
+
global_navtree_object = o; // <- we added this line
|
| 89 |
+
o.toroot = toroot;
|
| 90 |
+
o.node = new Object();
|
| 91 |
+
o.node.li = document.getElementById("nav-tree-contents");
|
| 92 |
+
o.node.childrenData = NAVTREE;
|
| 93 |
+
o.node.children = new Array();
|
| 94 |
+
o.node.childrenUL = document.createElement("ul");
|
| 95 |
+
o.node.getChildrenUL = function() { return o.node.childrenUL; };
|
| 96 |
+
o.node.li.appendChild(o.node.childrenUL);
|
| 97 |
+
o.node.depth = 0;
|
| 98 |
+
o.node.relpath = relpath;
|
| 99 |
+
o.node.expanded = false;
|
| 100 |
+
o.node.isLast = true;
|
| 101 |
+
o.node.plus_img = document.createElement("img");
|
| 102 |
+
o.node.plus_img.src = relpath+"ftv2pnode.png";
|
| 103 |
+
o.node.plus_img.width = 16;
|
| 104 |
+
o.node.plus_img.height = 22;
|
| 105 |
+
|
| 106 |
+
if (localStorageSupported()) {
|
| 107 |
+
var navSync = $('#nav-sync');
|
| 108 |
+
if (cachedLink()) {
|
| 109 |
+
showSyncOff(navSync,relpath);
|
| 110 |
+
navSync.removeClass('sync');
|
| 111 |
+
} else {
|
| 112 |
+
showSyncOn(navSync,relpath);
|
| 113 |
+
}
|
| 114 |
+
navSync.click(function(){ toggleSyncButton(relpath); });
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
navTo(o,toroot,window.location.hash,relpath);
|
| 118 |
+
|
| 119 |
+
$(window).bind('hashchange', function(){
|
| 120 |
+
if (window.location.hash && window.location.hash.length>1){
|
| 121 |
+
var a;
|
| 122 |
+
if ($(location).attr('hash')){
|
| 123 |
+
var clslink=stripPath($(location).attr('pathname'))+':'+
|
| 124 |
+
$(location).attr('hash').substring(1);
|
| 125 |
+
a=$('.item a[class$="'+clslink+'"]');
|
| 126 |
+
}
|
| 127 |
+
if (a==null || !$(a).parent().parent().hasClass('selected')){
|
| 128 |
+
$('.item').removeClass('selected');
|
| 129 |
+
$('.item').removeAttr('id');
|
| 130 |
+
}
|
| 131 |
+
var link=stripPath2($(location).attr('pathname'));
|
| 132 |
+
navTo(o,link,$(location).attr('hash'),relpath);
|
| 133 |
+
} else if (!animationInProgress) {
|
| 134 |
+
$('#doc-content').scrollTop(0);
|
| 135 |
+
$('.item').removeClass('selected');
|
| 136 |
+
$('.item').removeAttr('id');
|
| 137 |
+
navTo(o,toroot,window.location.hash,relpath);
|
| 138 |
+
}
|
| 139 |
+
})
|
| 140 |
+
|
| 141 |
+
$(window).on("load", showRoot);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
// return false if the the node has no children at all, or has only section/subsection children
|
| 145 |
+
function checkChildrenData(node) {
|
| 146 |
+
if (!(typeof(node.childrenData)==='string')) {
|
| 147 |
+
for (var i in node.childrenData) {
|
| 148 |
+
var url = node.childrenData[i][1];
|
| 149 |
+
if(url.indexOf("#")==-1)
|
| 150 |
+
return true;
|
| 151 |
+
}
|
| 152 |
+
return false;
|
| 153 |
+
}
|
| 154 |
+
return (node.childrenData);
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
// Modified to:
|
| 158 |
+
// 1 - remove the root node
|
| 159 |
+
// 2 - remove the section/subsection children
|
| 160 |
+
function createIndent(o,domNode,node,level)
|
| 161 |
+
{
|
| 162 |
+
var level=-2; // <- we replaced level=-1 by level=-2
|
| 163 |
+
var n = node;
|
| 164 |
+
while (n.parentNode) { level++; n=n.parentNode; }
|
| 165 |
+
if (checkChildrenData(node)) { // <- we modified this line to use checkChildrenData(node) instead of node.childrenData
|
| 166 |
+
var imgNode = document.createElement("span");
|
| 167 |
+
imgNode.className = 'arrow';
|
| 168 |
+
imgNode.style.paddingLeft=(16*level).toString()+'px';
|
| 169 |
+
imgNode.innerHTML=arrowRight;
|
| 170 |
+
node.plus_img = imgNode;
|
| 171 |
+
node.expandToggle = document.createElement("a");
|
| 172 |
+
node.expandToggle.href = "javascript:void(0)";
|
| 173 |
+
node.expandToggle.onclick = function() {
|
| 174 |
+
if (node.expanded) {
|
| 175 |
+
$(node.getChildrenUL()).slideUp("fast");
|
| 176 |
+
node.plus_img.innerHTML=arrowRight;
|
| 177 |
+
node.expanded = false;
|
| 178 |
+
} else {
|
| 179 |
+
expandNode(o, node, false, false);
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
node.expandToggle.appendChild(imgNode);
|
| 183 |
+
domNode.appendChild(node.expandToggle);
|
| 184 |
+
} else {
|
| 185 |
+
var span = document.createElement("span");
|
| 186 |
+
span.className = 'arrow';
|
| 187 |
+
span.style.width = 16*(level+1)+'px';
|
| 188 |
+
span.innerHTML = ' ';
|
| 189 |
+
domNode.appendChild(span);
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
// Overloaded to automatically expand the selected node
|
| 194 |
+
function selectAndHighlight(hash,n)
|
| 195 |
+
{
|
| 196 |
+
var a;
|
| 197 |
+
if (hash) {
|
| 198 |
+
var link=stripPath($(location).attr('pathname'))+':'+hash.substring(1);
|
| 199 |
+
a=$('.item a[class$="'+link+'"]');
|
| 200 |
+
}
|
| 201 |
+
if (a && a.length) {
|
| 202 |
+
a.parent().parent().addClass('selected');
|
| 203 |
+
a.parent().parent().attr('id','selected');
|
| 204 |
+
highlightAnchor();
|
| 205 |
+
} else if (n) {
|
| 206 |
+
$(n.itemDiv).addClass('selected');
|
| 207 |
+
$(n.itemDiv).attr('id','selected');
|
| 208 |
+
}
|
| 209 |
+
if ($('#nav-tree-contents .item:first').hasClass('selected')) {
|
| 210 |
+
$('#nav-sync').css('top','30px');
|
| 211 |
+
} else {
|
| 212 |
+
$('#nav-sync').css('top','5px');
|
| 213 |
+
}
|
| 214 |
+
expandNode(global_navtree_object, n, true, true); // <- we added this line
|
| 215 |
+
showRoot();
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
$(document).ready(function() {
|
| 220 |
+
|
| 221 |
+
generate_autotoc();
|
| 222 |
+
|
| 223 |
+
(function (){ // wait until the first "selected" element has been created
|
| 224 |
+
try {
|
| 225 |
+
|
| 226 |
+
// this line will triger an exception if there is no #selected element, i.e., before the tree structure is complete.
|
| 227 |
+
document.getElementById("selected").className = "item selected";
|
| 228 |
+
|
| 229 |
+
// ok, the default tree has been created, we can keep going...
|
| 230 |
+
|
| 231 |
+
// expand the "Chapters" node
|
| 232 |
+
if(window.location.href.indexOf('unsupported')==-1)
|
| 233 |
+
expandNode(global_navtree_object, global_navtree_object.node.children[0].children[2], true, true);
|
| 234 |
+
else
|
| 235 |
+
expandNode(global_navtree_object, global_navtree_object.node.children[0].children[1], true, true);
|
| 236 |
+
|
| 237 |
+
// Hide the root node "Eigen"
|
| 238 |
+
$(document.getElementsByClassName('index.html')[0]).parent().parent().css({display:"none"});
|
| 239 |
+
|
| 240 |
+
} catch (err) {
|
| 241 |
+
setTimeout(arguments.callee, 10);
|
| 242 |
+
}
|
| 243 |
+
})();
|
| 244 |
+
|
| 245 |
+
$(window).on("load", resizeHeight);
|
| 246 |
+
});
|
| 247 |
+
|