diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/include/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h new file mode 100644 index 0000000000000000000000000000000000000000..a117fc1551920de7e36ea6e33cbf959a0e62b4ab --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BASIC_PRECONDITIONERS_H +#define EIGEN_BASIC_PRECONDITIONERS_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \brief A preconditioner based on the digonal entries + * + * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. + * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: + \code + A.diagonal().asDiagonal() . x = b + \endcode + * + * \tparam _Scalar the type of the scalar. + * + * \implsparsesolverconcept + * + * This preconditioner is suitable for both selfadjoint and general problems. + * The diagonal entries are pre-inverted and stored into a dense vector. + * + * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. + * + * \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient + */ +template +class DiagonalPreconditioner +{ + typedef _Scalar Scalar; + typedef Matrix Vector; + public: + typedef typename Vector::StorageIndex StorageIndex; + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + + DiagonalPreconditioner() : m_isInitialized(false) {} + + template + explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) + { + compute(mat); + } + + EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_invdiag.size(); } + EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_invdiag.size(); } + + template + DiagonalPreconditioner& analyzePattern(const MatType& ) + { + return *this; + } + + template + DiagonalPreconditioner& factorize(const MatType& mat) + { + m_invdiag.resize(mat.cols()); + for(int j=0; j + DiagonalPreconditioner& compute(const MatType& mat) + { + return factorize(mat); + } + + /** \internal */ + template + void _solve_impl(const Rhs& b, Dest& x) const + { + x = m_invdiag.array() * b.array() ; + } + + template inline const Solve + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); + eigen_assert(m_invdiag.size()==b.rows() + && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); + return Solve(*this, b.derived()); + } + + ComputationInfo info() { return Success; } + + protected: + Vector m_invdiag; + bool m_isInitialized; +}; + +/** \ingroup IterativeLinearSolvers_Module + * \brief Jacobi preconditioner for LeastSquaresConjugateGradient + * + * This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix. + * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: + \code + (A.adjoint() * A).diagonal().asDiagonal() * x = b + \endcode + * + * \tparam _Scalar the type of the scalar. + * + * \implsparsesolverconcept + * + * The diagonal entries are pre-inverted and stored into a dense vector. + * + * \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner + */ +template +class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar> +{ + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef DiagonalPreconditioner<_Scalar> Base; + using Base::m_invdiag; + public: + + LeastSquareDiagonalPreconditioner() : Base() {} + + template + explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base() + { + compute(mat); + } + + template + LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& ) + { + return *this; + } + + template + LeastSquareDiagonalPreconditioner& factorize(const MatType& mat) + { + // Compute the inverse squared-norm of each column of mat + m_invdiag.resize(mat.cols()); + if(MatType::IsRowMajor) + { + m_invdiag.setZero(); + for(Index j=0; jRealScalar(0)) + m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j)); + } + else + { + for(Index j=0; jRealScalar(0)) + m_invdiag(j) = RealScalar(1)/sum; + else + m_invdiag(j) = RealScalar(1); + } + } + Base::m_isInitialized = true; + return *this; + } + + template + LeastSquareDiagonalPreconditioner& compute(const MatType& mat) + { + return factorize(mat); + } + + ComputationInfo info() { return Success; } + + protected: +}; + +/** \ingroup IterativeLinearSolvers_Module + * \brief A naive preconditioner which approximates any matrix as the identity matrix + * + * \implsparsesolverconcept + * + * \sa class DiagonalPreconditioner + */ +class IdentityPreconditioner +{ + public: + + IdentityPreconditioner() {} + + template + explicit IdentityPreconditioner(const MatrixType& ) {} + + template + IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& compute(const MatrixType& ) { return *this; } + + template + inline const Rhs& solve(const Rhs& b) const { return b; } + + ComputationInfo info() { return Success; } +}; + +} // end namespace Eigen + +#endif // EIGEN_BASIC_PRECONDITIONERS_H diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/include/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h new file mode 100644 index 0000000000000000000000000000000000000000..1c9ade5623f9a9ad9c3ed4065073935a81e49441 --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -0,0 +1,212 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BICGSTAB_H +#define EIGEN_BICGSTAB_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level bi conjugate gradient stabilized algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + * \return false in the case of numerical issue, for example a break down of BiCGSTAB. + */ +template +bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + RealScalar tol = tol_error; + Index maxIters = iters; + + Index n = mat.cols(); + VectorType r = rhs - mat * x; + VectorType r0 = r; + + RealScalar r0_sqnorm = r0.squaredNorm(); + RealScalar rhs_sqnorm = rhs.squaredNorm(); + if(rhs_sqnorm == 0) + { + x.setZero(); + return true; + } + Scalar rho (1); + Scalar alpha (1); + Scalar w (1); + + VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); + VectorType y(n), z(n); + VectorType kt(n), ks(n); + + VectorType s(n), t(n); + + RealScalar tol2 = tol*tol*rhs_sqnorm; + RealScalar eps2 = NumTraits::epsilon()*NumTraits::epsilon(); + Index i = 0; + Index restarts = 0; + + while ( r.squaredNorm() > tol2 && iRealScalar(0)) + w = t.dot(s) / tmp; + else + w = Scalar(0); + x += alpha * y + w * z; + r = s - w * t; + ++i; + } + tol_error = sqrt(r.squaredNorm()/rhs_sqnorm); + iters = i; + return true; +} + +} + +template< typename _MatrixType, + typename _Preconditioner = DiagonalPreconditioner > +class BiCGSTAB; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A bi conjugate gradient stabilized solver for sparse square problems + * + * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient + * stabilized algorithm. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * The tolerance corresponds to the relative residual error: |Ax-b|/|b| + * + * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format. + * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled. + * See \ref TopicMultiThreading for details. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \include BiCGSTAB_simple.cpp + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, typename _Preconditioner> +class BiCGSTAB : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + BiCGSTAB() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit BiCGSTAB(const EigenBase& A) : Base(A.derived()) {} + + ~BiCGSTAB() {} + + /** \internal */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + bool ret = internal::bicgstab(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error); + + m_info = (!ret) ? NumericalIssue + : m_error <= Base::m_tolerance ? Success + : NoConvergence; + } + +protected: + +}; + +} // end namespace Eigen + +#endif // EIGEN_BICGSTAB_H diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/include/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h new file mode 100644 index 0000000000000000000000000000000000000000..c3ca0ad54c0d0967dc5c3bd399f00a44fa9bc1b9 --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h @@ -0,0 +1,227 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONJUGATE_GRADIENT_H +#define EIGEN_CONJUGATE_GRADIENT_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level conjugate gradient algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ +template +EIGEN_DONT_INLINE +void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) +{ + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + RealScalar tol = tol_error; + Index maxIters = iters; + + Index n = mat.cols(); + + VectorType residual = rhs - mat * x; //initial residual + + RealScalar rhsNorm2 = rhs.squaredNorm(); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + const RealScalar considerAsZero = (std::numeric_limits::min)(); + RealScalar threshold = numext::maxi(RealScalar(tol*tol*rhsNorm2),considerAsZero); + RealScalar residualNorm2 = residual.squaredNorm(); + if (residualNorm2 < threshold) + { + iters = 0; + tol_error = numext::sqrt(residualNorm2 / rhsNorm2); + return; + } + + VectorType p(n); + p = precond.solve(residual); // initial search direction + + VectorType z(n), tmp(n); + RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM + Index i = 0; + while(i < maxIters) + { + tmp.noalias() = mat * p; // the bottleneck of the algorithm + + Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir + x += alpha * p; // update solution + residual -= alpha * tmp; // update residual + + residualNorm2 = residual.squaredNorm(); + if(residualNorm2 < threshold) + break; + + z = precond.solve(residual); // approximately solve for "A z = residual" + + RealScalar absOld = absNew; + absNew = numext::real(residual.dot(z)); // update the absolute value of r + RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction + p = z + beta * p; // update search direction + i++; + } + tol_error = numext::sqrt(residualNorm2 / rhsNorm2); + iters = i; +} + +} + +template< typename _MatrixType, int _UpLo=Lower, + typename _Preconditioner = DiagonalPreconditioner > +class ConjugateGradient; + +namespace internal { + +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems + * + * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm. + * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, + * \c Upper, or \c Lower|Upper in which the full matrix entries will be considered. + * Default is \c Lower, best performance is \c Lower|Upper. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * The tolerance corresponds to the relative residual error: |Ax-b|/|b| + * + * \b Performance: Even though the default value of \c _UpLo is \c Lower, significantly higher performance is + * achieved when using a complete matrix and \b Lower|Upper as the \a _UpLo template parameter. Moreover, in this + * case multi-threading can be exploited if the user code is compiled with OpenMP enabled. + * See \ref TopicMultiThreading for details. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + \code + int n = 10000; + VectorXd x(n), b(n); + SparseMatrix A(n,n); + // fill A and b + ConjugateGradient, Lower|Upper> cg; + cg.compute(A); + x = cg.solve(b); + std::cout << "#iterations: " << cg.iterations() << std::endl; + std::cout << "estimated error: " << cg.error() << std::endl; + // update b, and solve again + x = cg.solve(b); + \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +class ConjugateGradient : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + enum { + UpLo = _UpLo + }; + +public: + + /** Default constructor. */ + ConjugateGradient() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit ConjugateGradient(const EigenBase& A) : Base(A.derived()) {} + + ~ConjugateGradient() {} + + /** \internal */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + typedef typename Base::MatrixWrapper MatrixWrapper; + typedef typename Base::ActualMatrixType ActualMatrixType; + enum { + TransposeInput = (!MatrixWrapper::MatrixFree) + && (UpLo==(Lower|Upper)) + && (!MatrixType::IsRowMajor) + && (!NumTraits::IsComplex) + }; + typedef typename internal::conditional, ActualMatrixType const&>::type RowMajorWrapper; + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY); + typedef typename internal::conditional::Type + >::type SelfAdjointWrapper; + + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + RowMajorWrapper row_mat(matrix()); + internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b, x, Base::m_preconditioner, m_iterations, m_error); + m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; + } + +protected: + +}; + +} // end namespace Eigen + +#endif // EIGEN_CONJUGATE_GRADIENT_H diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h b/include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h new file mode 100644 index 0000000000000000000000000000000000000000..5e632c4e2f3555d72569eba6de97d8631747199a --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h @@ -0,0 +1,394 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INCOMPLETE_CHOlESKY_H +#define EIGEN_INCOMPLETE_CHOlESKY_H + +#include +#include + +namespace Eigen { +/** + * \brief Modified Incomplete Cholesky with dual threshold + * + * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with + * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 + * + * \tparam Scalar the scalar type of the input matrices + * \tparam _UpLo The triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering, + * unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering. + * + * \implsparsesolverconcept + * + * It performs the following incomplete factorization: \f$ S P A P' S \approx L L' \f$ + * where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a + * fill-in reducing permutation as computed by the ordering method. + * + * \b Shifting \b strategy: Let \f$ B = S P A P' S \f$ be the scaled matrix on which the factorization is carried out, + * and \f$ \beta \f$ be the minimum value of the diagonal. If \f$ \beta > 0 \f$ then, the factorization is directly performed + * on the matrix B. Otherwise, the factorization is performed on the shifted matrix \f$ B + (\sigma+|\beta| I \f$ where + * \f$ \sigma \f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \f$ \sigma = 10^{-3} \f$. + * If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by + * the info() method, then you can either increase the initial shift, or better use another preconditioning technique. + * + */ +template > +class IncompleteCholesky : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + public: + typedef typename NumTraits::Real RealScalar; + typedef _OrderingType OrderingType; + typedef typename OrderingType::PermutationType PermutationType; + typedef typename PermutationType::StorageIndex StorageIndex; + typedef SparseMatrix FactorType; + typedef Matrix VectorSx; + typedef Matrix VectorRx; + typedef Matrix VectorIx; + typedef std::vector > VectorList; + enum { UpLo = _UpLo }; + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + public: + + /** Default constructor leaving the object in a partly non-initialized stage. + * + * You must call compute() or the pair analyzePattern()/factorize() to make it valid. + * + * \sa IncompleteCholesky(const MatrixType&) + */ + IncompleteCholesky() : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false) {} + + /** Constructor computing the incomplete factorization for the given matrix \a matrix. + */ + template + IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_analysisIsOk(false),m_factorizationIsOk(false) + { + compute(matrix); + } + + /** \returns number of rows of the factored matrix */ + EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_L.rows(); } + + /** \returns number of columns of the factored matrix */ + EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_L.cols(); } + + + /** \brief Reports whether previous computation was successful. + * + * It triggers an assertion if \c *this has not been initialized through the respective constructor, + * or a call to compute() or analyzePattern(). + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IncompleteCholesky is not initialized."); + return m_info; + } + + /** \brief Set the initial shift parameter \f$ \sigma \f$. + */ + void setInitialShift(RealScalar shift) { m_initialShift = shift; } + + /** \brief Computes the fill reducing permutation vector using the sparsity pattern of \a mat + */ + template + void analyzePattern(const MatrixType& mat) + { + OrderingType ord; + PermutationType pinv; + ord(mat.template selfadjointView(), pinv); + if(pinv.size()>0) m_perm = pinv.inverse(); + else m_perm.resize(0); + m_L.resize(mat.rows(), mat.cols()); + m_analysisIsOk = true; + m_isInitialized = true; + m_info = Success; + } + + /** \brief Performs the numerical factorization of the input matrix \a mat + * + * The method analyzePattern() or compute() must have been called beforehand + * with a matrix having the same pattern. + * + * \sa compute(), analyzePattern() + */ + template + void factorize(const MatrixType& mat); + + /** Computes or re-computes the incomplete Cholesky factorization of the input matrix \a mat + * + * It is a shortcut for a sequential call to the analyzePattern() and factorize() methods. + * + * \sa analyzePattern(), factorize() + */ + template + void compute(const MatrixType& mat) + { + analyzePattern(mat); + factorize(mat); + } + + // internal + template + void _solve_impl(const Rhs& b, Dest& x) const + { + eigen_assert(m_factorizationIsOk && "factorize() should be called first"); + if (m_perm.rows() == b.rows()) x = m_perm * b; + else x = b; + x = m_scale.asDiagonal() * x; + x = m_L.template triangularView().solve(x); + x = m_L.adjoint().template triangularView().solve(x); + x = m_scale.asDiagonal() * x; + if (m_perm.rows() == b.rows()) + x = m_perm.inverse() * x; + } + + /** \returns the sparse lower triangular factor L */ + const FactorType& matrixL() const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); return m_L; } + + /** \returns a vector representing the scaling factor S */ + const VectorRx& scalingS() const { eigen_assert(m_factorizationIsOk && "factorize() should be called first"); return m_scale; } + + /** \returns the fill-in reducing permutation P (can be empty for a natural ordering) */ + const PermutationType& permutationP() const { eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); return m_perm; } + + protected: + FactorType m_L; // The lower part stored in CSC + VectorRx m_scale; // The vector for scaling the matrix + RealScalar m_initialShift; // The initial shift parameter + bool m_analysisIsOk; + bool m_factorizationIsOk; + ComputationInfo m_info; + PermutationType m_perm; + + private: + inline void updateList(Ref colPtr, Ref rowIdx, Ref vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol); +}; + +// Based on the following paper: +// C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with +// Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999 +// http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf +template +template +void IncompleteCholesky::factorize(const _MatrixType& mat) +{ + using std::sqrt; + eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); + + // Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added + + // Apply the fill-reducing permutation computed in analyzePattern() + if (m_perm.rows() == mat.rows() ) // To detect the null permutation + { + // The temporary is needed to make sure that the diagonal entry is properly sorted + FactorType tmp(mat.rows(), mat.cols()); + tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm); + m_L.template selfadjointView() = tmp.template selfadjointView(); + } + else + { + m_L.template selfadjointView() = mat.template selfadjointView<_UpLo>(); + } + + Index n = m_L.cols(); + Index nnz = m_L.nonZeros(); + Map vals(m_L.valuePtr(), nnz); //values + Map rowIdx(m_L.innerIndexPtr(), nnz); //Row indices + Map colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row + VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization + VectorList listCol(n); // listCol(j) is a linked list of columns to update column j + VectorSx col_vals(n); // Store a nonzero values in each column + VectorIx col_irow(n); // Row indices of nonzero elements in each column + VectorIx col_pattern(n); + col_pattern.fill(-1); + StorageIndex col_nnz; + + + // Computes the scaling factors + m_scale.resize(n); + m_scale.setZero(); + for (Index j = 0; j < n; j++) + for (Index k = colPtr[j]; k < colPtr[j+1]; k++) + { + m_scale(j) += numext::abs2(vals(k)); + if(rowIdx[k]!=j) + m_scale(rowIdx[k]) += numext::abs2(vals(k)); + } + + m_scale = m_scale.cwiseSqrt().cwiseSqrt(); + + for (Index j = 0; j < n; ++j) + if(m_scale(j)>(std::numeric_limits::min)()) + m_scale(j) = RealScalar(1)/m_scale(j); + else + m_scale(j) = 1; + + // TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster) + + // Scale and compute the shift for the matrix + RealScalar mindiag = NumTraits::highest(); + for (Index j = 0; j < n; j++) + { + for (Index k = colPtr[j]; k < colPtr[j+1]; k++) + vals[k] *= (m_scale(j)*m_scale(rowIdx[k])); + eigen_internal_assert(rowIdx[colPtr[j]]==j && "IncompleteCholesky: only the lower triangular part must be stored"); + mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag); + } + + FactorType L_save = m_L; + + RealScalar shift = 0; + if(mindiag <= RealScalar(0.)) + shift = m_initialShift - mindiag; + + m_info = NumericalIssue; + + // Try to perform the incomplete factorization using the current shift + int iter = 0; + do + { + // Apply the shift to the diagonal elements of the matrix + for (Index j = 0; j < n; j++) + vals[colPtr[j]] += shift; + + // jki version of the Cholesky factorization + Index j=0; + for (; j < n; ++j) + { + // Left-looking factorization of the j-th column + // First, load the j-th column into col_vals + Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored + col_nnz = 0; + for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++) + { + StorageIndex l = rowIdx[i]; + col_vals(col_nnz) = vals[i]; + col_irow(col_nnz) = l; + col_pattern(l) = col_nnz; + col_nnz++; + } + { + typename std::list::iterator k; + // Browse all previous columns that will update column j + for(k = listCol[j].begin(); k != listCol[j].end(); k++) + { + Index jk = firstElt(*k); // First element to use in the column + eigen_internal_assert(rowIdx[jk]==j); + Scalar v_j_jk = numext::conj(vals[jk]); + + jk += 1; + for (Index i = jk; i < colPtr[*k+1]; i++) + { + StorageIndex l = rowIdx[i]; + if(col_pattern[l]<0) + { + col_vals(col_nnz) = vals[i] * v_j_jk; + col_irow[col_nnz] = l; + col_pattern(l) = col_nnz; + col_nnz++; + } + else + col_vals(col_pattern[l]) -= vals[i] * v_j_jk; + } + updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol); + } + } + + // Scale the current column + if(numext::real(diag) <= 0) + { + if(++iter>=10) + return; + + // increase shift + shift = numext::maxi(m_initialShift,RealScalar(2)*shift); + // restore m_L, col_pattern, and listCol + vals = Map(L_save.valuePtr(), nnz); + rowIdx = Map(L_save.innerIndexPtr(), nnz); + colPtr = Map(L_save.outerIndexPtr(), n+1); + col_pattern.fill(-1); + for(Index i=0; i cvals = col_vals.head(col_nnz); + Ref cirow = col_irow.head(col_nnz); + internal::QuickSplit(cvals,cirow, p); + // Insert the largest p elements in the matrix + Index cpt = 0; + for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++) + { + vals[i] = col_vals(cpt); + rowIdx[i] = col_irow(cpt); + // restore col_pattern: + col_pattern(col_irow(cpt)) = -1; + cpt++; + } + // Get the first smallest row index and put it after the diagonal element + Index jk = colPtr(j)+1; + updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol); + } + + if(j==n) + { + m_factorizationIsOk = true; + m_info = Success; + } + } while(m_info!=Success); +} + +template +inline void IncompleteCholesky::updateList(Ref colPtr, Ref rowIdx, Ref vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol) +{ + if (jk < colPtr(col+1) ) + { + Index p = colPtr(col+1) - jk; + Index minpos; + rowIdx.segment(jk,p).minCoeff(&minpos); + minpos += jk; + if (rowIdx(minpos) != rowIdx(jk)) + { + //Swap + std::swap(rowIdx(jk),rowIdx(minpos)); + std::swap(vals(jk),vals(minpos)); + } + firstElt(col) = internal::convert_index(jk); + listCol[rowIdx(jk)].push_back(internal::convert_index(col)); + } +} + +} // end namespace Eigen + +#endif diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h new file mode 100644 index 0000000000000000000000000000000000000000..cdcf709eb63e0c5785d314db3855c813b8f4e3f3 --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -0,0 +1,453 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INCOMPLETE_LUT_H +#define EIGEN_INCOMPLETE_LUT_H + + +namespace Eigen { + +namespace internal { + +/** \internal + * Compute a quick-sort split of a vector + * On output, the vector row is permuted such that its elements satisfy + * abs(row(i)) >= abs(row(ncut)) if incut + * \param row The vector of values + * \param ind The array of index for the elements in @p row + * \param ncut The number of largest elements to keep + **/ +template +Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) +{ + typedef typename VectorV::RealScalar RealScalar; + using std::swap; + using std::abs; + Index mid; + Index n = row.size(); /* length of the vector */ + Index first, last ; + + ncut--; /* to fit the zero-based indices */ + first = 0; + last = n-1; + if (ncut < first || ncut > last ) return 0; + + do { + mid = first; + RealScalar abskey = abs(row(mid)); + for (Index j = first + 1; j <= last; j++) { + if ( abs(row(j)) > abskey) { + ++mid; + swap(row(mid), row(j)); + swap(ind(mid), ind(j)); + } + } + /* Interchange for the pivot element */ + swap(row(mid), row(first)); + swap(ind(mid), ind(first)); + + if (mid > ncut) last = mid - 1; + else if (mid < ncut ) first = mid + 1; + } while (mid != ncut ); + + return 0; /* mid is equal to ncut */ +} + +}// end namespace internal + +/** \ingroup IterativeLinearSolvers_Module + * \class IncompleteLUT + * \brief Incomplete LU factorization with dual-threshold strategy + * + * \implsparsesolverconcept + * + * During the numerical factorization, two dropping rules are used : + * 1) any element whose magnitude is less than some tolerance is dropped. + * This tolerance is obtained by multiplying the input tolerance @p droptol + * by the average magnitude of all the original elements in the current row. + * 2) After the elimination of the row, only the @p fill largest elements in + * the L part and the @p fill largest elements in the U part are kept + * (in addition to the diagonal element ). Note that @p fill is computed from + * the input parameter @p fillfactor which is used the ratio to control the fill_in + * relatively to the initial number of nonzero elements. + * + * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) + * and when @p fill=n/2 with @p droptol being different to zero. + * + * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, + * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. + * + * NOTE : The following implementation is derived from the ILUT implementation + * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota + * released under the terms of the GNU LGPL: + * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README + * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. + * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: + * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html + * alternatively, on GMANE: + * http://comments.gmane.org/gmane.comp.lib.eigen/3302 + */ +template +class IncompleteLUT : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase Base; + using Base::m_isInitialized; + public: + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef typename NumTraits::Real RealScalar; + typedef Matrix Vector; + typedef Matrix VectorI; + typedef SparseMatrix FactorType; + + enum { + ColsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic + }; + + public: + + IncompleteLUT() + : m_droptol(NumTraits::dummy_precision()), m_fillfactor(10), + m_analysisIsOk(false), m_factorizationIsOk(false) + {} + + template + explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits::dummy_precision(), int fillfactor = 10) + : m_droptol(droptol),m_fillfactor(fillfactor), + m_analysisIsOk(false),m_factorizationIsOk(false) + { + eigen_assert(fillfactor != 0); + compute(mat); + } + + EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return m_lu.rows(); } + + EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return m_lu.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); + return m_info; + } + + template + void analyzePattern(const MatrixType& amat); + + template + void factorize(const MatrixType& amat); + + /** + * Compute an incomplete LU factorization with dual threshold on the matrix mat + * No pivoting is done in this version + * + **/ + template + IncompleteLUT& compute(const MatrixType& amat) + { + analyzePattern(amat); + factorize(amat); + return *this; + } + + void setDroptol(const RealScalar& droptol); + void setFillfactor(int fillfactor); + + template + void _solve_impl(const Rhs& b, Dest& x) const + { + x = m_Pinv * b; + x = m_lu.template triangularView().solve(x); + x = m_lu.template triangularView().solve(x); + x = m_P * x; + } + +protected: + + /** keeps off-diagonal entries; drops diagonal entries */ + struct keep_diag { + inline bool operator() (const Index& row, const Index& col, const Scalar&) const + { + return row!=col; + } + }; + +protected: + + FactorType m_lu; + RealScalar m_droptol; + int m_fillfactor; + bool m_analysisIsOk; + bool m_factorizationIsOk; + ComputationInfo m_info; + PermutationMatrix m_P; // Fill-reducing permutation + PermutationMatrix m_Pinv; // Inverse permutation +}; + +/** + * Set control parameter droptol + * \param droptol Drop any element whose magnitude is less than this tolerance + **/ +template +void IncompleteLUT::setDroptol(const RealScalar& droptol) +{ + this->m_droptol = droptol; +} + +/** + * Set control parameter fillfactor + * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. + **/ +template +void IncompleteLUT::setFillfactor(int fillfactor) +{ + this->m_fillfactor = fillfactor; +} + +template +template +void IncompleteLUT::analyzePattern(const _MatrixType& amat) +{ + // Compute the Fill-reducing permutation + // Since ILUT does not perform any numerical pivoting, + // it is highly preferable to keep the diagonal through symmetric permutations. + // To this end, let's symmetrize the pattern and perform AMD on it. + SparseMatrix mat1 = amat; + SparseMatrix mat2 = amat.transpose(); + // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. + // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred... + SparseMatrix AtA = mat2 + mat1; + AMDOrdering ordering; + ordering(AtA,m_P); + m_Pinv = m_P.inverse(); // cache the inverse permutation + m_analysisIsOk = true; + m_factorizationIsOk = false; + m_isInitialized = true; +} + +template +template +void IncompleteLUT::factorize(const _MatrixType& amat) +{ + using std::sqrt; + using std::swap; + using std::abs; + using internal::convert_index; + + eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); + Index n = amat.cols(); // Size of the matrix + m_lu.resize(n,n); + // Declare Working vectors and variables + Vector u(n) ; // real values of the row -- maximum size is n -- + VectorI ju(n); // column position of the values in u -- maximum size is n + VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 + + // Apply the fill-reducing permutation + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + SparseMatrix mat; + mat = amat.twistedBy(m_Pinv); + + // Initialization + jr.fill(-1); + ju.fill(0); + u.fill(0); + + // number of largest elements to keep in each row: + Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1; + if (fill_in > n) fill_in = n; + + // number of largest nonzero elements to keep in the L and the U part of the current row: + Index nnzL = fill_in/2; + Index nnzU = nnzL; + m_lu.reserve(n * (nnzL + nnzU + 1)); + + // global loop over the rows of the sparse matrix + for (Index ii = 0; ii < n; ii++) + { + // 1 - copy the lower and the upper part of the row i of mat in the working vector u + + Index sizeu = 1; // number of nonzero elements in the upper part of the current row + Index sizel = 0; // number of nonzero elements in the lower part of the current row + ju(ii) = convert_index(ii); + u(ii) = 0; + jr(ii) = convert_index(ii); + RealScalar rownorm = 0; + + typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii + for (; j_it; ++j_it) + { + Index k = j_it.index(); + if (k < ii) + { + // copy the lower part + ju(sizel) = convert_index(k); + u(sizel) = j_it.value(); + jr(k) = convert_index(sizel); + ++sizel; + } + else if (k == ii) + { + u(ii) = j_it.value(); + } + else + { + // copy the upper part + Index jpos = ii + sizeu; + ju(jpos) = convert_index(k); + u(jpos) = j_it.value(); + jr(k) = convert_index(jpos); + ++sizeu; + } + rownorm += numext::abs2(j_it.value()); + } + + // 2 - detect possible zero row + if(rownorm==0) + { + m_info = NumericalIssue; + return; + } + // Take the 2-norm of the current row as a relative tolerance + rownorm = sqrt(rownorm); + + // 3 - eliminate the previous nonzero rows + Index jj = 0; + Index len = 0; + while (jj < sizel) + { + // In order to eliminate in the correct order, + // we must select first the smallest column index among ju(jj:sizel) + Index k; + Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + k += jj; + if (minrow != ju(jj)) + { + // swap the two locations + Index j = ju(jj); + swap(ju(jj), ju(k)); + jr(minrow) = convert_index(jj); + jr(j) = convert_index(k); + swap(u(jj), u(k)); + } + // Reset this location + jr(minrow) = -1; + + // Start elimination + typename FactorType::InnerIterator ki_it(m_lu, minrow); + while (ki_it && ki_it.index() < minrow) ++ki_it; + eigen_internal_assert(ki_it && ki_it.col()==minrow); + Scalar fact = u(jj) / ki_it.value(); + + // drop too small elements + if(abs(fact) <= m_droptol) + { + jj++; + continue; + } + + // linear combination of the current row ii and the row minrow + ++ki_it; + for (; ki_it; ++ki_it) + { + Scalar prod = fact * ki_it.value(); + Index j = ki_it.index(); + Index jpos = jr(j); + if (jpos == -1) // fill-in element + { + Index newpos; + if (j >= ii) // dealing with the upper part + { + newpos = ii + sizeu; + sizeu++; + eigen_internal_assert(sizeu<=n); + } + else // dealing with the lower part + { + newpos = sizel; + sizel++; + eigen_internal_assert(sizel<=ii); + } + ju(newpos) = convert_index(j); + u(newpos) = -prod; + jr(j) = convert_index(newpos); + } + else + u(jpos) -= prod; + } + // store the pivot element + u(len) = fact; + ju(len) = convert_index(minrow); + ++len; + + jj++; + } // end of the elimination on the row ii + + // reset the upper part of the pointer jr to zero + for(Index k = 0; k m_droptol * rownorm ) + { + ++len; + u(ii + len) = u(ii + k); + ju(ii + len) = ju(ii + k); + } + } + sizeu = len + 1; // +1 to take into account the diagonal element + len = (std::min)(sizeu, nnzU); + typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); + typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); + internal::QuickSplit(uu, juu, len); + + // store the largest elements of the U part + for(Index k = ii + 1; k < ii + len; k++) + m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); + } + m_lu.finalize(); + m_lu.makeCompressed(); + + m_factorizationIsOk = true; + m_info = Success; +} + +} // end namespace Eigen + +#endif // EIGEN_INCOMPLETE_LUT_H diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/include/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h new file mode 100644 index 0000000000000000000000000000000000000000..28a0c5109e9fef44e1db80d742988f015d00b604 --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -0,0 +1,444 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H +#define EIGEN_ITERATIVE_SOLVER_BASE_H + +namespace Eigen { + +namespace internal { + +template +struct is_ref_compatible_impl +{ +private: + template + struct any_conversion + { + template any_conversion(const volatile T&); + template any_conversion(T&); + }; + struct yes {int a[1];}; + struct no {int a[2];}; + + template + static yes test(const Ref&, int); + template + static no test(any_conversion, ...); + +public: + static MatrixType ms_from; + enum { value = sizeof(test(ms_from, 0))==sizeof(yes) }; +}; + +template +struct is_ref_compatible +{ + enum { value = is_ref_compatible_impl::type>::value }; +}; + +template::value> +class generic_matrix_wrapper; + +// We have an explicit matrix at hand, compatible with Ref<> +template +class generic_matrix_wrapper +{ +public: + typedef Ref ActualMatrixType; + template struct ConstSelfAdjointViewReturnType { + typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType::Type Type; + }; + + enum { + MatrixFree = false + }; + + generic_matrix_wrapper() + : m_dummy(0,0), m_matrix(m_dummy) + {} + + template + generic_matrix_wrapper(const InputType &mat) + : m_matrix(mat) + {} + + const ActualMatrixType& matrix() const + { + return m_matrix; + } + + template + void grab(const EigenBase &mat) + { + m_matrix.~Ref(); + ::new (&m_matrix) Ref(mat.derived()); + } + + void grab(const Ref &mat) + { + if(&(mat.derived()) != &m_matrix) + { + m_matrix.~Ref(); + ::new (&m_matrix) Ref(mat); + } + } + +protected: + MatrixType m_dummy; // used to default initialize the Ref<> object + ActualMatrixType m_matrix; +}; + +// MatrixType is not compatible with Ref<> -> matrix-free wrapper +template +class generic_matrix_wrapper +{ +public: + typedef MatrixType ActualMatrixType; + template struct ConstSelfAdjointViewReturnType + { + typedef ActualMatrixType Type; + }; + + enum { + MatrixFree = true + }; + + generic_matrix_wrapper() + : mp_matrix(0) + {} + + generic_matrix_wrapper(const MatrixType &mat) + : mp_matrix(&mat) + {} + + const ActualMatrixType& matrix() const + { + return *mp_matrix; + } + + void grab(const MatrixType &mat) + { + mp_matrix = &mat; + } + +protected: + const ActualMatrixType *mp_matrix; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief Base class for linear iterative solvers + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename Derived> +class IterativeSolverBase : public SparseSolverBase +{ +protected: + typedef SparseSolverBase Base; + using Base::m_isInitialized; + +public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename internal::traits::Preconditioner Preconditioner; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename MatrixType::RealScalar RealScalar; + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + +public: + + using Base::derived; + + /** Default constructor. */ + IterativeSolverBase() + { + init(); + } + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit IterativeSolverBase(const EigenBase& A) + : m_matrixWrapper(A.derived()) + { + init(); + compute(matrix()); + } + + ~IterativeSolverBase() {} + + /** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly calls analyzePattern on the preconditioner. In the future + * we might, for instance, implement column reordering for faster matrix vector products. + */ + template + Derived& analyzePattern(const EigenBase& A) + { + grab(A.derived()); + m_preconditioner.analyzePattern(matrix()); + m_isInitialized = true; + m_analysisIsOk = true; + m_info = m_preconditioner.info(); + return derived(); + } + + /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly calls factorize on the preconditioner. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + Derived& factorize(const EigenBase& A) + { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + grab(A.derived()); + m_preconditioner.factorize(matrix()); + m_factorizationIsOk = true; + m_info = m_preconditioner.info(); + return derived(); + } + + /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly initializes/computes the preconditioner. In the future + * we might, for instance, implement column reordering for faster matrix vector products. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + Derived& compute(const EigenBase& A) + { + grab(A.derived()); + m_preconditioner.compute(matrix()); + m_isInitialized = true; + m_analysisIsOk = true; + m_factorizationIsOk = true; + m_info = m_preconditioner.info(); + return derived(); + } + + /** \internal */ + EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { return matrix().rows(); } + + /** \internal */ + EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { return matrix().cols(); } + + /** \returns the tolerance threshold used by the stopping criteria. + * \sa setTolerance() + */ + RealScalar tolerance() const { return m_tolerance; } + + /** Sets the tolerance threshold used by the stopping criteria. + * + * This value is used as an upper bound to the relative residual error: |Ax-b|/|b|. + * The default value is the machine precision given by NumTraits::epsilon() + */ + Derived& setTolerance(const RealScalar& tolerance) + { + m_tolerance = tolerance; + return derived(); + } + + /** \returns a read-write reference to the preconditioner for custom configuration. */ + Preconditioner& preconditioner() { return m_preconditioner; } + + /** \returns a read-only reference to the preconditioner. */ + const Preconditioner& preconditioner() const { return m_preconditioner; } + + /** \returns the max number of iterations. + * It is either the value set by setMaxIterations or, by default, + * twice the number of columns of the matrix. + */ + Index maxIterations() const + { + return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations; + } + + /** Sets the max number of iterations. + * Default is twice the number of columns of the matrix. + */ + Derived& setMaxIterations(Index maxIters) + { + m_maxIterations = maxIters; + return derived(); + } + + /** \returns the number of iterations performed during the last solve */ + Index iterations() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_iterations; + } + + /** \returns the tolerance error reached during the last solve. + * It is a close approximation of the true relative residual error |Ax-b|/|b|. + */ + RealScalar error() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_error; + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * and \a x0 as an initial solution. + * + * \sa solve(), compute() + */ + template + inline const SolveWithGuess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "Solver is not initialized."); + eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); + return SolveWithGuess(derived(), b.derived(), x0); + } + + /** \returns Success if the iterations converged, and NoConvergence otherwise. */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + return m_info; + } + + /** \internal */ + template + void _solve_with_guess_impl(const Rhs& b, SparseMatrixBase &aDest) const + { + eigen_assert(rows()==b.rows()); + + Index rhsCols = b.cols(); + Index size = b.rows(); + DestDerived& dest(aDest.derived()); + typedef typename DestDerived::Scalar DestScalar; + Eigen::Matrix tb(size); + Eigen::Matrix tx(cols()); + // We do not directly fill dest because sparse expressions have to be free of aliasing issue. + // For non square least-square problems, b and dest might not have the same size whereas they might alias each-other. + typename DestDerived::PlainObject tmp(cols(),rhsCols); + ComputationInfo global_info = Success; + for(Index k=0; k + typename internal::enable_if::type + _solve_with_guess_impl(const Rhs& b, MatrixBase &aDest) const + { + eigen_assert(rows()==b.rows()); + + Index rhsCols = b.cols(); + DestDerived& dest(aDest.derived()); + ComputationInfo global_info = Success; + for(Index k=0; k + typename internal::enable_if::type + _solve_with_guess_impl(const Rhs& b, MatrixBase &dest) const + { + derived()._solve_vector_with_guess_impl(b,dest.derived()); + } + + /** \internal default initial guess = 0 */ + template + void _solve_impl(const Rhs& b, Dest& x) const + { + x.setZero(); + derived()._solve_with_guess_impl(b,x); + } + +protected: + void init() + { + m_isInitialized = false; + m_analysisIsOk = false; + m_factorizationIsOk = false; + m_maxIterations = -1; + m_tolerance = NumTraits::epsilon(); + } + + typedef internal::generic_matrix_wrapper MatrixWrapper; + typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType; + + const ActualMatrixType& matrix() const + { + return m_matrixWrapper.matrix(); + } + + template + void grab(const InputType &A) + { + m_matrixWrapper.grab(A); + } + + MatrixWrapper m_matrixWrapper; + Preconditioner m_preconditioner; + + Index m_maxIterations; + RealScalar m_tolerance; + + mutable RealScalar m_error; + mutable Index m_iterations; + mutable ComputationInfo m_info; + mutable bool m_analysisIsOk, m_factorizationIsOk; +}; + +} // end namespace Eigen + +#endif // EIGEN_ITERATIVE_SOLVER_BASE_H diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h b/include/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h new file mode 100644 index 0000000000000000000000000000000000000000..203fd0ec63f5979870fb66546e6bd991335b8801 --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h @@ -0,0 +1,198 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H +#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level conjugate gradient algorithm for least-square problems + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of A'Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ +template +EIGEN_DONT_INLINE +void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + RealScalar tol = tol_error; + Index maxIters = iters; + + Index m = mat.rows(), n = mat.cols(); + + VectorType residual = rhs - mat * x; + VectorType normal_residual = mat.adjoint() * residual; + + RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm(); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + RealScalar threshold = tol*tol*rhsNorm2; + RealScalar residualNorm2 = normal_residual.squaredNorm(); + if (residualNorm2 < threshold) + { + iters = 0; + tol_error = sqrt(residualNorm2 / rhsNorm2); + return; + } + + VectorType p(n); + p = precond.solve(normal_residual); // initial search direction + + VectorType z(n), tmp(m); + RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM + Index i = 0; + while(i < maxIters) + { + tmp.noalias() = mat * p; + + Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir + x += alpha * p; // update solution + residual -= alpha * tmp; // update residual + normal_residual = mat.adjoint() * residual; // update residual of the normal equation + + residualNorm2 = normal_residual.squaredNorm(); + if(residualNorm2 < threshold) + break; + + z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual" + + RealScalar absOld = absNew; + absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r + RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction + p = z + beta * p; // update search direction + i++; + } + tol_error = sqrt(residualNorm2 / rhsNorm2); + iters = i; +} + +} + +template< typename _MatrixType, + typename _Preconditioner = LeastSquareDiagonalPreconditioner > +class LeastSquaresConjugateGradient; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A conjugate gradient solver for sparse (or dense) least-square problems + * + * This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm. + * The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability. + * Otherwise, the SparseLU or SparseQR classes might be preferable. + * The matrix A and the vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + \code + int m=1000000, n = 10000; + VectorXd x(n), b(m); + SparseMatrix A(m,n); + // fill A and b + LeastSquaresConjugateGradient > lscg; + lscg.compute(A); + x = lscg.solve(b); + std::cout << "#iterations: " << lscg.iterations() << std::endl; + std::cout << "estimated error: " << lscg.error() << std::endl; + // update b, and solve again + x = lscg.solve(b); + \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * \sa class ConjugateGradient, SparseLU, SparseQR + */ +template< typename _MatrixType, typename _Preconditioner> +class LeastSquaresConjugateGradient : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + LeastSquaresConjugateGradient() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit LeastSquaresConjugateGradient(const EigenBase& A) : Base(A.derived()) {} + + ~LeastSquaresConjugateGradient() {} + + /** \internal */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + internal::least_square_conjugate_gradient(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error); + m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; + } + +}; + +} // end namespace Eigen + +#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H diff --git a/include/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/include/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h new file mode 100644 index 0000000000000000000000000000000000000000..7b896575428056e83c36fcfd2cf11672696d685e --- /dev/null +++ b/include/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h @@ -0,0 +1,117 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SOLVEWITHGUESS_H +#define EIGEN_SOLVEWITHGUESS_H + +namespace Eigen { + +template class SolveWithGuess; + +/** \class SolveWithGuess + * \ingroup IterativeLinearSolvers_Module + * + * \brief Pseudo expression representing a solving operation + * + * \tparam Decomposition the type of the matrix or decomposion object + * \tparam Rhstype the type of the right-hand side + * + * This class represents an expression of A.solve(B) + * and most of the time this is the only way it is used. + * + */ +namespace internal { + + +template +struct traits > + : traits > +{}; + +} + + +template +class SolveWithGuess : public internal::generic_xpr_base, MatrixXpr, typename internal::traits::StorageKind>::type +{ +public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::PlainObject PlainObject; + typedef typename internal::generic_xpr_base, MatrixXpr, typename internal::traits::StorageKind>::type Base; + typedef typename internal::ref_selector::type Nested; + + SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess) + : m_dec(dec), m_rhs(rhs), m_guess(guess) + {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + Index rows() const EIGEN_NOEXCEPT { return m_dec.cols(); } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + Index cols() const EIGEN_NOEXCEPT { return m_rhs.cols(); } + + EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } + EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } + EIGEN_DEVICE_FUNC const GuessType& guess() const { return m_guess; } + +protected: + const Decomposition &m_dec; + const RhsType &m_rhs; + const GuessType &m_guess; + +private: + Scalar coeff(Index row, Index col) const; + Scalar coeff(Index i) const; +}; + +namespace internal { + +// Evaluator of SolveWithGuess -> eval into a temporary +template +struct evaluator > + : public evaluator::PlainObject> +{ + typedef SolveWithGuess SolveType; + typedef typename SolveType::PlainObject PlainObject; + typedef evaluator Base; + + evaluator(const SolveType& solve) + : m_result(solve.rows(), solve.cols()) + { + ::new (static_cast(this)) Base(m_result); + m_result = solve.guess(); + solve.dec()._solve_with_guess_impl(solve.rhs(), m_result); + } + +protected: + PlainObject m_result; +}; + +// Specialization for "dst = dec.solveWithGuess(rhs)" +// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere +template +struct Assignment, internal::assign_op, Dense2Dense> +{ + typedef SolveWithGuess SrcXprType; + static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op &) + { + Index dstRows = src.rows(); + Index dstCols = src.cols(); + if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) + dst.resize(dstRows, dstCols); + + dst = src.guess(); + src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SOLVEWITHGUESS_H diff --git a/include/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h b/include/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h new file mode 100644 index 0000000000000000000000000000000000000000..37426877ad4338463f796209fe884b5fb3499704 --- /dev/null +++ b/include/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -0,0 +1,678 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PASTIXSUPPORT_H +#define EIGEN_PASTIXSUPPORT_H + +namespace Eigen { + +#if defined(DCOMPLEX) + #define PASTIX_COMPLEX COMPLEX + #define PASTIX_DCOMPLEX DCOMPLEX +#else + #define PASTIX_COMPLEX std::complex + #define PASTIX_DCOMPLEX std::complex +#endif + +/** \ingroup PaStiXSupport_Module + * \brief Interface to the PaStix solver + * + * This class is used to solve the linear systems A.X = B via the PaStix library. + * The matrix can be either real or complex, symmetric or not. + * + * \sa TutorialSparseDirectSolvers + */ +template class PastixLU; +template class PastixLLT; +template class PastixLDLT; + +namespace internal +{ + + template struct pastix_traits; + + template + struct pastix_traits< PastixLU<_MatrixType> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + template + struct pastix_traits< PastixLLT<_MatrixType,Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + template + struct pastix_traits< PastixLDLT<_MatrixType,Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::StorageIndex StorageIndex; + }; + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); + } + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); + } + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + } + + inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + } + + // Convert the matrix to Fortran-style Numbering + template + void c_to_fortran_numbering (MatrixType& mat) + { + if ( !(mat.outerIndexPtr()[0]) ) + { + int i; + for(i = 0; i <= mat.rows(); ++i) + ++mat.outerIndexPtr()[i]; + for(i = 0; i < mat.nonZeros(); ++i) + ++mat.innerIndexPtr()[i]; + } + } + + // Convert to C-style Numbering + template + void fortran_to_c_numbering (MatrixType& mat) + { + // Check the Numbering + if ( mat.outerIndexPtr()[0] == 1 ) + { // Convert to C-style numbering + int i; + for(i = 0; i <= mat.rows(); ++i) + --mat.outerIndexPtr()[i]; + for(i = 0; i < mat.nonZeros(); ++i) + --mat.innerIndexPtr()[i]; + } + } +} + +// This is the base class to interface with PaStiX functions. +// Users should not used this class directly. +template +class PastixBase : public SparseSolverBase +{ + protected: + typedef SparseSolverBase Base; + using Base::derived; + using Base::m_isInitialized; + public: + using Base::_solve_impl; + + typedef typename internal::pastix_traits::MatrixType _MatrixType; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix Vector; + typedef SparseMatrix ColSpMatrix; + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + + public: + + PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_pastixdata(0), m_size(0) + { + init(); + } + + ~PastixBase() + { + clean(); + } + + template + bool _solve_impl(const MatrixBase &b, MatrixBase &x) const; + + /** Returns a reference to the integer vector IPARM of PaStiX parameters + * to modify the default parameters. + * The statistics related to the different phases of factorization and solve are saved here as well + * \sa analyzePattern() factorize() + */ + Array& iparm() + { + return m_iparm; + } + + /** Return a reference to a particular index parameter of the IPARM vector + * \sa iparm() + */ + + int& iparm(int idxparam) + { + return m_iparm(idxparam); + } + + /** Returns a reference to the double vector DPARM of PaStiX parameters + * The statistics related to the different phases of factorization and solve are saved here as well + * \sa analyzePattern() factorize() + */ + Array& dparm() + { + return m_dparm; + } + + + /** Return a reference to a particular index parameter of the DPARM vector + * \sa dparm() + */ + double& dparm(int idxparam) + { + return m_dparm(idxparam); + } + + inline Index cols() const { return m_size; } + inline Index rows() const { return m_size; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, + * \c NumericalIssue if the PaStiX reports a problem + * \c InvalidInput if the input matrix is invalid + * + * \sa iparm() + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + protected: + + // Initialize the Pastix data structure, check the matrix + void init(); + + // Compute the ordering and the symbolic factorization + void analyzePattern(ColSpMatrix& mat); + + // Compute the numerical factorization + void factorize(ColSpMatrix& mat); + + // Free all the data allocated by Pastix + void clean() + { + eigen_assert(m_initisOk && "The Pastix structure should be allocated first"); + m_iparm(IPARM_START_TASK) = API_TASK_CLEAN; + m_iparm(IPARM_END_TASK) = API_TASK_CLEAN; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, + m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + } + + void compute(ColSpMatrix& mat); + + int m_initisOk; + int m_analysisIsOk; + int m_factorizationIsOk; + mutable ComputationInfo m_info; + mutable pastix_data_t *m_pastixdata; // Data structure for pastix + mutable int m_comm; // The MPI communicator identifier + mutable Array m_iparm; // integer vector for the input parameters + mutable Array m_dparm; // Scalar vector for the input parameters + mutable Matrix m_perm; // Permutation vector + mutable Matrix m_invp; // Inverse permutation vector + mutable int m_size; // Size of the matrix +}; + + /** Initialize the PaStiX data structure. + *A first call to this function fills iparm and dparm with the default PaStiX parameters + * \sa iparm() dparm() + */ +template +void PastixBase::init() +{ + m_size = 0; + m_iparm.setZero(IPARM_SIZE); + m_dparm.setZero(DPARM_SIZE); + + m_iparm(IPARM_MODIFY_PARAMETER) = API_NO; + pastix(&m_pastixdata, MPI_COMM_WORLD, + 0, 0, 0, 0, + 0, 0, 0, 1, m_iparm.data(), m_dparm.data()); + + m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO; + m_iparm[IPARM_VERBOSE] = API_VERBOSE_NOT; + m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH; + m_iparm[IPARM_INCOMPLETE] = API_NO; + m_iparm[IPARM_OOC_LIMIT] = 2000; + m_iparm[IPARM_RHS_MAKING] = API_RHS_B; + m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; + + m_iparm(IPARM_START_TASK) = API_TASK_INIT; + m_iparm(IPARM_END_TASK) = API_TASK_INIT; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, + 0, 0, 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) { + m_info = InvalidInput; + m_initisOk = false; + } + else { + m_info = Success; + m_initisOk = true; + } +} + +template +void PastixBase::compute(ColSpMatrix& mat) +{ + eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared"); + + analyzePattern(mat); + factorize(mat); + + m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; +} + + +template +void PastixBase::analyzePattern(ColSpMatrix& mat) +{ + eigen_assert(m_initisOk && "The initialization of PaSTiX failed"); + + // clean previous calls + if(m_size>0) + clean(); + + m_size = internal::convert_index(mat.rows()); + m_perm.resize(m_size); + m_invp.resize(m_size); + + m_iparm(IPARM_START_TASK) = API_TASK_ORDERING; + m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), + mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) + { + m_info = NumericalIssue; + m_analysisIsOk = false; + } + else + { + m_info = Success; + m_analysisIsOk = true; + } +} + +template +void PastixBase::factorize(ColSpMatrix& mat) +{ +// if(&m_cpyMat != &mat) m_cpyMat = mat; + eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); + m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; + m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; + m_size = internal::convert_index(mat.rows()); + + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), + mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) + { + m_info = NumericalIssue; + m_factorizationIsOk = false; + m_isInitialized = false; + } + else + { + m_info = Success; + m_factorizationIsOk = true; + m_isInitialized = true; + } +} + +/* Solve the system */ +template +template +bool PastixBase::_solve_impl(const MatrixBase &b, MatrixBase &x) const +{ + eigen_assert(m_isInitialized && "The matrix should be factorized first"); + EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + int rhs = 1; + + x = b; /* on return, x is overwritten by the computed solution */ + + for (int i = 0; i < b.cols(); i++){ + m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; + m_iparm[IPARM_END_TASK] = API_TASK_REFINE; + + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index(x.rows()), 0, 0, 0, + m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); + } + + // Check the returned error + m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue; + + return m_iparm(IPARM_ERROR_NUMBER)==0; +} + +/** \ingroup PaStiXSupport_Module + * \class PastixLU + * \brief Sparse direct LU solver based on PaStiX library + * + * This class is used to solve the linear systems A.X = B with a supernodal LU + * factorization in the PaStiX library. The matrix A should be squared and nonsingular + * PaStiX requires that the matrix A has a symmetric structural pattern. + * This interface can symmetrize the input matrix otherwise. + * The vectors or matrices X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false + * NOTE : Note that if the analysis and factorization phase are called separately, + * the input matrix will be symmetrized at each call, hence it is advised to + * symmetrize the matrix in a end-user program and set \p IsStrSym to true + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SparseLU + * + */ +template +class PastixLU : public PastixBase< PastixLU<_MatrixType> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + typedef typename MatrixType::StorageIndex StorageIndex; + + public: + PastixLU() : Base() + { + init(); + } + + explicit PastixLU(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + /** Compute the LU supernodal factorization of \p matrix. + * iparm and dparm can be used to tune the PaStiX parameters. + * see the PaStiX user's manual + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + m_structureIsUptodate = false; + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern. + * Several ordering methods can be used at this step. See the PaStiX user's manual. + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + m_structureIsUptodate = false; + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + + /** Compute the LU supernodal factorization of \p matrix + * WARNING The matrix \p matrix should have the same structural pattern + * as the same used in the analysis phase. + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + protected: + + void init() + { + m_structureIsUptodate = false; + m_iparm(IPARM_SYM) = API_SYM_NO; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LU; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + if(IsStrSym) + out = matrix; + else + { + if(!m_structureIsUptodate) + { + // update the transposed structure + m_transposedStructure = matrix.transpose(); + + // Set the elements of the matrix to zero + for (Index j=0; j + * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SimplicialLLT + */ +template +class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + + public: + enum { UpLo = _UpLo }; + PastixLLT() : Base() + { + init(); + } + + explicit PastixLLT(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + + /** Compute the L factor of the LL^T supernodal factorization of \p matrix + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + + /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + /** Compute the LL^T supernodal numerical factorization of \p matrix + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + protected: + using Base::m_iparm; + + void init() + { + m_iparm(IPARM_SYM) = API_SYM_YES; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + out.resize(matrix.rows(), matrix.cols()); + // Pastix supports only lower, column-major matrices + out.template selfadjointView() = matrix.template selfadjointView(); + internal::c_to_fortran_numbering(out); + } +}; + +/** \ingroup PaStiXSupport_Module + * \class PastixLDLT + * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library + * + * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization + * available in the PaStiX library. The matrix A should be symmetric and positive definite + * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX + * The vectors or matrices X and B can be either dense or sparse + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX + * + * \implsparsesolverconcept + * + * \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT + */ +template +class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + + public: + enum { UpLo = _UpLo }; + PastixLDLT():Base() + { + init(); + } + + explicit PastixLDLT(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + + /** Compute the L and D factors of the LDL^T factorization of \p matrix + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + + /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + /** Compute the LDL^T supernodal numerical factorization of \p matrix + * + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + + protected: + using Base::m_iparm; + + void init() + { + m_iparm(IPARM_SYM) = API_SYM_YES; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + // Pastix supports only lower, column-major matrices + out.resize(matrix.rows(), matrix.cols()); + out.template selfadjointView() = matrix.template selfadjointView(); + internal::c_to_fortran_numbering(out); + } +}; + +} // end namespace Eigen + +#endif diff --git a/include/eigen/Eigen/src/SVD/BDCSVD.h b/include/eigen/Eigen/src/SVD/BDCSVD.h new file mode 100644 index 0000000000000000000000000000000000000000..79a6562b7cf469a028bfa37fbaf6736dd9f53bd4 --- /dev/null +++ b/include/eigen/Eigen/src/SVD/BDCSVD.h @@ -0,0 +1,1377 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" +// research report written by Ming Gu and Stanley C.Eisenstat +// The code variable names correspond to the names they used in their +// report +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// Copyright (C) 2013 Jitse Niesen +// Copyright (C) 2014-2017 Gael Guennebaud +// +// Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BDCSVD_H +#define EIGEN_BDCSVD_H +// #define EIGEN_BDCSVD_DEBUG_VERBOSE +// #define EIGEN_BDCSVD_SANITY_CHECKS + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS +#undef eigen_internal_assert +#define eigen_internal_assert(X) assert(X); +#endif + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE +#include +#endif + +namespace Eigen { + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE +IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]"); +#endif + +template class BDCSVD; + +namespace internal { + +template +struct traits > + : traits<_MatrixType> +{ + typedef _MatrixType MatrixType; +}; + +} // end namespace internal + + +/** \ingroup SVD_Module + * + * + * \class BDCSVD + * + * \brief class Bidiagonal Divide and Conquer SVD + * + * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition + * + * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization, + * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD. + * You can control the switching size with the setSwitchSize() method, default is 16. + * For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly + * recommended and can several order of magnitude faster. + * + * \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations. + * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless + * you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will + * significantly degrade the accuracy. + * + * \sa class JacobiSVD + */ +template +class BDCSVD : public SVDBase > +{ + typedef SVDBase Base; + +public: + using Base::rows; + using Base::cols; + using Base::computeU; + using Base::computeV; + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename NumTraits::Literal Literal; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef typename Base::MatrixUType MatrixUType; + typedef typename Base::MatrixVType MatrixVType; + typedef typename Base::SingularValuesType SingularValuesType; + + typedef Matrix MatrixX; + typedef Matrix MatrixXr; + typedef Matrix VectorType; + typedef Array ArrayXr; + typedef Array ArrayXi; + typedef Ref ArrayRef; + typedef Ref IndicesRef; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via BDCSVD::compute(const MatrixType&). + */ + BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0) + {} + + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem size. + * \sa BDCSVD() + */ + BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) + : m_algoswap(16), m_numIters(0) + { + allocate(rows, cols, computationOptions); + } + + /** \brief Constructor performing the decomposition of given matrix. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + : m_algoswap(16), m_numIters(0) + { + compute(matrix, computationOptions); + } + + ~BDCSVD() + { + } + + /** \brief Method performing the decomposition of given matrix using custom options. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + BDCSVD& compute(const MatrixType& matrix) + { + return compute(matrix, this->m_computationOptions); + } + + void setSwitchSize(int s) + { + eigen_assert(s>=3 && "BDCSVD the size of the algo switch has to be at least 3."); + m_algoswap = s; + } + +private: + void allocate(Index rows, Index cols, unsigned int computationOptions); + void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); + void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); + void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus); + void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat); + void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V); + void deflation43(Index firstCol, Index shift, Index i, Index size); + void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); + void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); + template + void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev); + void structured_update(Block A, const MatrixXr &B, Index n1); + static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift); + +protected: + MatrixXr m_naiveU, m_naiveV; + MatrixXr m_computed; + Index m_nRec; + ArrayXr m_workspace; + ArrayXi m_workspaceI; + int m_algoswap; + bool m_isTranspose, m_compU, m_compV; + + using Base::m_singularValues; + using Base::m_diagSize; + using Base::m_computeFullU; + using Base::m_computeFullV; + using Base::m_computeThinU; + using Base::m_computeThinV; + using Base::m_matrixU; + using Base::m_matrixV; + using Base::m_info; + using Base::m_isInitialized; + using Base::m_nonzeroSingularValues; + +public: + int m_numIters; +}; //end class BDCSVD + + +// Method to allocate and initialize matrix and attributes +template +void BDCSVD::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions) +{ + m_isTranspose = (cols > rows); + + if (Base::allocate(rows, cols, computationOptions)) + return; + + m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize ); + m_compU = computeV(); + m_compV = computeU(); + if (m_isTranspose) + std::swap(m_compU, m_compV); + + if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 ); + else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 ); + + if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize); + + m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3); + m_workspaceI.resize(3*m_diagSize); +}// end allocate + +template +BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) +{ +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "\n\n\n======================================================================================================================\n\n\n"; +#endif + allocate(matrix.rows(), matrix.cols(), computationOptions); + using std::abs; + + const RealScalar considerZero = (std::numeric_limits::min)(); + + //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return + if(matrix.cols() < m_algoswap) + { + // FIXME this line involves temporaries + JacobiSVD jsvd(matrix,computationOptions); + m_isInitialized = true; + m_info = jsvd.info(); + if (m_info == Success || m_info == NoConvergence) { + if(computeU()) m_matrixU = jsvd.matrixU(); + if(computeV()) m_matrixV = jsvd.matrixV(); + m_singularValues = jsvd.singularValues(); + m_nonzeroSingularValues = jsvd.nonzeroSingularValues(); + } + return *this; + } + + //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows + RealScalar scale = matrix.cwiseAbs().template maxCoeff(); + if (!(numext::isfinite)(scale)) { + m_isInitialized = true; + m_info = InvalidInput; + return *this; + } + + if(scale==Literal(0)) scale = Literal(1); + MatrixX copy; + if (m_isTranspose) copy = matrix.adjoint()/scale; + else copy = matrix/scale; + + //**** step 1 - Bidiagonalization + // FIXME this line involves temporaries + internal::UpperBidiagonalization bid(copy); + + //**** step 2 - Divide & Conquer + m_naiveU.setZero(); + m_naiveV.setZero(); + // FIXME this line involves a temporary matrix + m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); + m_computed.template bottomRows<1>().setZero(); + divide(0, m_diagSize - 1, 0, 0, 0); + if (m_info != Success && m_info != NoConvergence) { + m_isInitialized = true; + return *this; + } + + //**** step 3 - Copy singular values and vectors + for (int i=0; i +template +void BDCSVD::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV) +{ + // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa + if (computeU()) + { + Index Ucols = m_computeThinU ? m_diagSize : householderU.cols(); + m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); + m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast().topLeftCorner(m_diagSize, m_diagSize); + householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer + } + if (computeV()) + { + Index Vcols = m_computeThinV ? m_diagSize : householderV.cols(); + m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); + m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast().topLeftCorner(m_diagSize, m_diagSize); + householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer + } +} + +/** \internal + * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as: + * A = [A1] + * [A2] + * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros. + * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large + * enough. + */ +template +void BDCSVD::structured_update(Block A, const MatrixXr &B, Index n1) +{ + Index n = A.rows(); + if(n>100) + { + // If the matrices are large enough, let's exploit the sparse structure of A by + // splitting it in half (wrt n1), and packing the non-zero columns. + Index n2 = n - n1; + Map A1(m_workspace.data() , n1, n); + Map A2(m_workspace.data()+ n1*n, n2, n); + Map B1(m_workspace.data()+ n*n, n, n); + Map B2(m_workspace.data()+2*n*n, n, n); + Index k1=0, k2=0; + for(Index j=0; j tmp(m_workspace.data(),n,n); + tmp.noalias() = A*B; + A = tmp; + } +} + +// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the +// place of the submatrix we are currently working on. + +//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; +//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; +// lastCol + 1 - firstCol is the size of the submatrix. +//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) +//@param firstColW : Same as firstRowW with the column. +//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix +// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. +template +void BDCSVD::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) +{ + // requires rows = cols + 1; + using std::pow; + using std::sqrt; + using std::abs; + const Index n = lastCol - firstCol + 1; + const Index k = n/2; + const RealScalar considerZero = (std::numeric_limits::min)(); + RealScalar alphaK; + RealScalar betaK; + RealScalar r0; + RealScalar lambda, phi, c0, s0; + VectorType l, f; + // We use the other algorithm which is more efficient for small + // matrices. + if (n < m_algoswap) + { + // FIXME this line involves temporaries + JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0)); + m_info = b.info(); + if (m_info != Success && m_info != NoConvergence) return; + if (m_compU) + m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU(); + else + { + m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0); + m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n); + } + if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV(); + m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); + m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n); + return; + } + // We use the divide and conquer algorithm + alphaK = m_computed(firstCol + k, firstCol + k); + betaK = m_computed(firstCol + k + 1, firstCol + k); + // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices + // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the + // right submatrix before the left one. + divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift); + if (m_info != Success && m_info != NoConvergence) return; + divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1); + if (m_info != Success && m_info != NoConvergence) return; + + if (m_compU) + { + lambda = m_naiveU(firstCol + k, firstCol + k); + phi = m_naiveU(firstCol + k + 1, lastCol + 1); + } + else + { + lambda = m_naiveU(1, firstCol + k); + phi = m_naiveU(0, lastCol + 1); + } + r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi)); + if (m_compU) + { + l = m_naiveU.row(firstCol + k).segment(firstCol, k); + f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1); + } + else + { + l = m_naiveU.row(1).segment(firstCol, k); + f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1); + } + if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1); + if (r0= firstCol; i--) + m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1); + // we shift q1 at the left with a factor c0 + m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0); + // last column = q1 * - s0 + m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; + // q2 *= c0 + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; + } + else + { + RealScalar q1 = m_naiveU(0, firstCol + k); + // we shift Q1 to the right + for (Index i = firstCol + k - 1; i >= firstCol; i--) + m_naiveU(0, i + 1) = m_naiveU(0, i); + // we shift q1 at the left with a factor c0 + m_naiveU(0, firstCol) = (q1 * c0); + // last column = q1 * - s0 + m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; + // q2 *= c0 + m_naiveU(1, lastCol + 1) *= c0; + m_naiveU.row(1).segment(firstCol + 1, k).setZero(); + m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); + } + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + + m_computed(firstCol + shift, firstCol + shift) = r0; + m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real(); + m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real(); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues(); +#endif + // Second part: try to deflate singular values in combined matrix + deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues(); + std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n"; + std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n"; + std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n"; + static int count = 0; + std::cout << "# " << ++count << "\n\n"; + assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm()); +// assert(count<681); +// assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all()); +#endif + + // Third part: compute SVD of combined matrix + MatrixXr UofSVD, VofSVD; + VectorType singVals; + computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(UofSVD.allFinite()); + assert(VofSVD.allFinite()); +#endif + + if (m_compU) + structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2); + else + { + Map,Aligned> tmp(m_workspace.data(),2,n+1); + tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD; + m_naiveU.middleCols(firstCol, n + 1) = tmp; + } + + if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + + m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); + m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; +}// end divide + +// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in +// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing +// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except +// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order. +// +// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better +// handling of round-off errors, be consistent in ordering +// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf +template +void BDCSVD::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) +{ + const RealScalar considerZero = (std::numeric_limits::min)(); + using std::abs; + ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n); + m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal(); + ArrayRef diag = m_workspace.head(n); + diag(0) = Literal(0); + + // Allocate space for singular values and vectors + singVals.resize(n); + U.resize(n+1, n+1); + if (m_compV) V.resize(n, n); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + if (col0.hasNaN() || diag.hasNaN()) + std::cout << "\n\nHAS NAN\n\n"; +#endif + + // Many singular values might have been deflated, the zero ones have been moved to the end, + // but others are interleaved and we must ignore them at this stage. + // To this end, let's compute a permutation skipping them: + Index actual_n = n; + while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); } + Index m = 0; // size of the deflated problem + for(Index k=0;kconsiderZero) + m_workspaceI(m++) = k; + Map perm(m_workspaceI.data(),m); + + Map shifts(m_workspace.data()+1*n, n); + Map mus(m_workspace.data()+2*n, n); + Map zhat(m_workspace.data()+3*n, n); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "computeSVDofM using:\n"; + std::cout << " z: " << col0.transpose() << "\n"; + std::cout << " d: " << diag.transpose() << "\n"; +#endif + + // Compute singVals, shifts, and mus + computeSingVals(col0, diag, perm, singVals, shifts, mus); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n"; + std::cout << " sing-val: " << singVals.transpose() << "\n"; + std::cout << " mu: " << mus.transpose() << "\n"; + std::cout << " shift: " << shifts.transpose() << "\n"; + + { + std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n"; + std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n"; + assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all()); + std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n"; + assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all()); + } +#endif + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(singVals.allFinite()); + assert(mus.allFinite()); + assert(shifts.allFinite()); +#endif + + // Compute zhat + perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat); +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << " zhat: " << zhat.transpose() << "\n"; +#endif + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(zhat.allFinite()); +#endif + + computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n"; + std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n"; +#endif + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); + assert(U.allFinite()); + assert(V.allFinite()); +// assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits::epsilon() * n); +// assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits::epsilon() * n); +#endif + + // Because of deflation, the singular values might not be completely sorted. + // Fortunately, reordering them is a O(n) problem + for(Index i=0; isingVals(i+1)) + { + using std::swap; + swap(singVals(i),singVals(i+1)); + U.col(i).swap(U.col(i+1)); + if(m_compV) V.col(i).swap(V.col(i+1)); + } + } + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + { + bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all(); + if(!singular_values_sorted) + std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n"; + assert(singular_values_sorted); + } +#endif + + // Reverse order so that singular values in increased order + // Because of deflation, the zeros singular-values are already at the end + singVals.head(actual_n).reverseInPlace(); + U.leftCols(actual_n).rowwise().reverseInPlace(); + if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace(); + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + JacobiSVD jsvd(m_computed.block(firstCol, firstCol, n, n) ); + std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n"; + std::cout << " * sing-val: " << singVals.transpose() << "\n"; +// std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n"; +#endif +} + +template +typename BDCSVD::RealScalar BDCSVD::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift) +{ + Index m = perm.size(); + RealScalar res = Literal(1); + for(Index i=0; i +void BDCSVD::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, + VectorType& singVals, ArrayRef shifts, ArrayRef mus) +{ + using std::abs; + using std::swap; + using std::sqrt; + + Index n = col0.size(); + Index actual_n = n; + // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above + // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value. + while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n; + + for (Index k = 0; k < n; ++k) + { + if (col0(k) == Literal(0) || actual_n==1) + { + // if col0(k) == 0, then entry is deflated, so singular value is on diagonal + // if actual_n==1, then the deflated problem is already diagonalized + singVals(k) = k==0 ? col0(0) : diag(k); + mus(k) = Literal(0); + shifts(k) = k==0 ? col0(0) : diag(k); + continue; + } + + // otherwise, use secular equation to find singular value + RealScalar left = diag(k); + RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm()); + if(k==actual_n-1) + right = (diag(actual_n-1) + col0.matrix().norm()); + else + { + // Skip deflated singular values, + // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside. + // This should be equivalent to using perm[] + Index l = k+1; + while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l Literal(0)) ? left : right; + + // measure everything relative to shift + Map diagShifted(m_workspace.data()+4*n, n); + diagShifted = diag - shift; + + if(k!=actual_n-1) + { + // check that after the shift, f(mid) is still negative: + RealScalar midShifted = (right - left) / RealScalar(2); + if(shift==right) + midShifted = -midShifted; + RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift); + if(fMidShifted>0) + { + // fMid was erroneous, fix it: + shift = fMidShifted > Literal(0) ? left : right; + diagShifted = diag - shift; + } + } + + // initial guess + RealScalar muPrev, muCur; + if (shift == left) + { + muPrev = (right - left) * RealScalar(0.1); + if (k == actual_n-1) muCur = right - left; + else muCur = (right - left) * RealScalar(0.5); + } + else + { + muPrev = -(right - left) * RealScalar(0.1); + muCur = -(right - left) * RealScalar(0.5); + } + + RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift); + RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift); + if (abs(fPrev) < abs(fCur)) + { + swap(fPrev, fCur); + swap(muPrev, muCur); + } + + // rational interpolation: fit a function of the form a / mu + b through the two previous + // iterates and use its zero to compute the next iterate + bool useBisection = fPrev*fCur>Literal(0); + while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits::epsilon() * numext::maxi(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits::epsilon() && !useBisection) + { + ++m_numIters; + + // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples. + RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev); + RealScalar b = fCur - a / muCur; + // And find mu such that f(mu)==0: + RealScalar muZero = -a/b; + RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert((numext::isfinite)(fZero)); +#endif + + muPrev = muCur; + fPrev = fCur; + muCur = muZero; + fCur = fZero; + + if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true; + if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true; + if (abs(fCur)>abs(fPrev)) useBisection = true; + } + + // fall back on bisection method if rational interpolation did not work + if (useBisection) + { +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n"; +#endif + RealScalar leftShifted, rightShifted; + if (shift == left) + { + // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)), + // the factor 2 is to be more conservative + leftShifted = numext::maxi( (std::numeric_limits::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits::max)()) ); + + // check that we did it right: + eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) ); + // I don't understand why the case k==0 would be special there: + // if (k == 0) rightShifted = right - left; else + rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe + } + else + { + leftShifted = -(right - left) * RealScalar(0.51); + if(k+1( (std::numeric_limits::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits::max)()) ); + else + rightShifted = -(std::numeric_limits::min)(); + } + + RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift); + eigen_internal_assert(fLeft [" << leftShifted << " " << rightShifted << "], shift=" << shift + << " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift) + << " == " << secularEq(right, col0, diag, perm, diag, 0) << " == " << fRight << "\n"; + } +#endif + eigen_internal_assert(fLeft * fRight < Literal(0)); + + if(fLeft Literal(2) * NumTraits::epsilon() * numext::maxi(abs(leftShifted), abs(rightShifted))) + { + RealScalar midShifted = (leftShifted + rightShifted) / Literal(2); + fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift); + eigen_internal_assert((numext::isfinite)(fMid)); + + if (fLeft * fMid < Literal(0)) + { + rightShifted = midShifted; + } + else + { + leftShifted = midShifted; + fLeft = fMid; + } + } + muCur = (leftShifted + rightShifted) / Literal(2); + } + else + { + // We have a problem as shifting on the left or right give either a positive or negative value + // at the middle of [left,right]... + // Instead fo abbording or entering an infinite loop, + // let's just use the middle as the estimated zero-crossing: + muCur = (right - left) * RealScalar(0.5); + if(shift == right) + muCur = -muCur; + } + } + + singVals[k] = shift + muCur; + shifts[k] = shift; + mus[k] = muCur; + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + if(k+1=singVals[k-1]); + assert(singVals[k]>=diag(k)); +#endif + + // perturb singular value slightly if it equals diagonal entry to avoid division by zero later + // (deflation is supposed to avoid this from happening) + // - this does no seem to be necessary anymore - + // if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); + // if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); + } +} + + +// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) +template +void BDCSVD::perturbCol0 + (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, + const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat) +{ + using std::sqrt; + Index n = col0.size(); + Index m = perm.size(); + if(m==0) + { + zhat.setZero(); + return; + } + Index lastIdx = perm(m-1); + // The offset permits to skip deflated entries while computing zhat + for (Index k = 0; k < n; ++k) + { + if (col0(k) == Literal(0)) // deflated + zhat(k) = Literal(0); + else + { + // see equation (3.6) + RealScalar dk = diag(k); + RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk)); +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + if(prod<0) { + std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n"; + std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n"; + std::cout << " = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) << "\n"; + } + assert(prod>=0); +#endif + + for(Index l = 0; l=k && (l==0 || l-1>=m)) + { + std::cout << "Error in perturbCol0\n"; + std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n"; + std::cout << " " <= k && l == 0) { + m_info = NumericalIssue; + prod = 0; + break; + } + Index j = i 0 ? perm(l-1) : i; +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + if(!(dk!=Literal(0) || diag(i)!=Literal(0))) + { + std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n"; + } + assert(dk!=Literal(0) || diag(i)!=Literal(0)); +#endif + prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk))); +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(prod>=0); +#endif +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 ) + std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk)) + << ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n"; +#endif + } + } +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n"; +#endif + RealScalar tmp = sqrt(prod); +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert((numext::isfinite)(tmp)); +#endif + zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp); + } + } +} + +// compute singular vectors +template +void BDCSVD::computeSingVecs + (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, + const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V) +{ + Index n = zhat.size(); + Index m = perm.size(); + + for (Index k = 0; k < n; ++k) + { + if (zhat(k) == Literal(0)) + { + U.col(k) = VectorType::Unit(n+1, k); + if (m_compV) V.col(k) = VectorType::Unit(n, k); + } + else + { + U.col(k).setZero(); + for(Index l=0;l= 1, di almost null and zi non null. +// We use a rotation to zero out zi applied to the left of M +template +void BDCSVD::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size) +{ + using std::abs; + using std::sqrt; + using std::pow; + Index start = firstCol + shift; + RealScalar c = m_computed(start, start); + RealScalar s = m_computed(start+i, start); + RealScalar r = numext::hypot(c,s); + if (r == Literal(0)) + { + m_computed(start+i, start+i) = Literal(0); + return; + } + m_computed(start,start) = r; + m_computed(start+i, start) = Literal(0); + m_computed(start+i, start+i) = Literal(0); + + JacobiRotation J(c/r,-s/r); + if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J); + else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J); +}// end deflation 43 + + +// page 13 +// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M) +// We apply two rotations to have zj = 0; +// TODO deflation44 is still broken and not properly tested +template +void BDCSVD::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size) +{ + using std::abs; + using std::sqrt; + using std::conj; + using std::pow; + RealScalar c = m_computed(firstColm+i, firstColm); + RealScalar s = m_computed(firstColm+j, firstColm); + RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s)); +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; " + << m_computed(firstColm + i-1, firstColm) << " " + << m_computed(firstColm + i, firstColm) << " " + << m_computed(firstColm + i+1, firstColm) << " " + << m_computed(firstColm + i+2, firstColm) << "\n"; + std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " " + << m_computed(firstColm + i, firstColm+i) << " " + << m_computed(firstColm + i+1, firstColm+i+1) << " " + << m_computed(firstColm + i+2, firstColm+i+2) << "\n"; +#endif + if (r==Literal(0)) + { + m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); + return; + } + c/=r; + s/=r; + m_computed(firstColm + i, firstColm) = r; + m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i); + m_computed(firstColm + j, firstColm) = Literal(0); + + JacobiRotation J(c,-s); + if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J); + else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J); + if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J); +}// end deflation 44 + + +// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] +template +void BDCSVD::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) +{ + using std::sqrt; + using std::abs; + const Index length = lastCol + 1 - firstCol; + + Block col0(m_computed, firstCol+shift, firstCol+shift, length, 1); + Diagonal fulldiag(m_computed); + VectorBlock,Dynamic> diag(fulldiag, firstCol+shift, length); + + const RealScalar considerZero = (std::numeric_limits::min)(); + RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff(); + RealScalar epsilon_strict = numext::maxi(considerZero,NumTraits::epsilon() * maxDiag); + RealScalar epsilon_coarse = Literal(8) * NumTraits::epsilon() * numext::maxi(col0.cwiseAbs().maxCoeff(), maxDiag); + +#ifdef EIGEN_BDCSVD_SANITY_CHECKS + assert(m_naiveU.allFinite()); + assert(m_naiveV.allFinite()); + assert(m_computed.allFinite()); +#endif + +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n"; +#endif + + //condition 4.1 + if (diag(0) < epsilon_coarse) + { +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n"; +#endif + diag(0) = epsilon_coarse; + } + + //condition 4.2 + for (Index i=1;i k) permutation[p] = j++; + else if (j >= length) permutation[p] = i++; + else if (diag(i) < diag(j)) permutation[p] = j++; + else permutation[p] = i++; + } + } + + // If we have a total deflation, then we have to insert diag(0) at the right place + if(total_deflation) + { + for(Index i=1; i0 && (abs(diag(i))1;--i) + if( (diag(i) - diag(i-1)) < NumTraits::epsilon()*maxDiag ) + { +#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE + std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits::epsilon()*/*diag(i)*/maxDiag << "\n"; +#endif + eigen_internal_assert(abs(diag(i) - diag(i-1)) +BDCSVD::PlainObject> +MatrixBase::bdcSvd(unsigned int computationOptions) const +{ + return BDCSVD(*this, computationOptions); +} + +} // end namespace Eigen + +#endif diff --git a/include/eigen/Eigen/src/SVD/JacobiSVD.h b/include/eigen/Eigen/src/SVD/JacobiSVD.h new file mode 100644 index 0000000000000000000000000000000000000000..4b002ad4c5021901392f5f514000cd7853ddfc53 --- /dev/null +++ b/include/eigen/Eigen/src/SVD/JacobiSVD.h @@ -0,0 +1,813 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Benoit Jacob +// Copyright (C) 2013-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_JACOBISVD_H +#define EIGEN_JACOBISVD_H + +namespace Eigen { + +namespace internal { +// forward declaration (needed by ICC) +// the empty body is required by MSVC +template::IsComplex> +struct svd_precondition_2x2_block_to_be_real {}; + +/*** QR preconditioners (R-SVD) + *** + *** Their role is to reduce the problem of computing the SVD to the case of a square matrix. + *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for + *** JacobiSVD which by itself is only able to work on square matrices. + ***/ + +enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; + +template +struct qr_preconditioner_should_do_anything +{ + enum { a = MatrixType::RowsAtCompileTime != Dynamic && + MatrixType::ColsAtCompileTime != Dynamic && + MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime, + b = MatrixType::RowsAtCompileTime != Dynamic && + MatrixType::ColsAtCompileTime != Dynamic && + MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime, + ret = !( (QRPreconditioner == NoQRPreconditioner) || + (Case == PreconditionIfMoreColsThanRows && bool(a)) || + (Case == PreconditionIfMoreRowsThanCols && bool(b)) ) + }; +}; + +template::ret +> struct qr_preconditioner_impl {}; + +template +class qr_preconditioner_impl +{ +public: + void allocate(const JacobiSVD&) {} + bool run(JacobiSVD&, const MatrixType&) + { + return false; + } +}; + +/*** preconditioner using FullPivHouseholderQR ***/ + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime + }; + typedef Matrix WorkspaceType; + + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.rows() > matrix.cols()) + { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); + if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); + return true; + } + return false; + } +private: + typedef FullPivHouseholderQR QRType; + QRType m_qr; + WorkspaceType m_workspace; +}; + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + + typedef typename internal::make_proper_matrix_type< + Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime + >::type TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.cols(), svd.rows()); + } + m_adjoint.resize(svd.cols(), svd.rows()); + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.cols() > matrix.rows()) + { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); + if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); + return true; + } + else return false; + } +private: + typedef FullPivHouseholderQR QRType; + QRType m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; +}; + +/*** preconditioner using ColPivHouseholderQR ***/ + +template +class qr_preconditioner_impl +{ +public: + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.rows() > matrix.cols()) + { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) + { + svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); + } + if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); + return true; + } + return false; + } + +private: + typedef ColPivHouseholderQR QRType; + QRType m_qr; + typename internal::plain_col_type::type m_workspace; +}; + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + + typedef typename internal::make_proper_matrix_type< + Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime + >::type TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.cols(), svd.rows()); + } + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); + m_adjoint.resize(svd.cols(), svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.cols() > matrix.rows()) + { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) + { + svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); + } + if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); + return true; + } + else return false; + } + +private: + typedef ColPivHouseholderQR QRType; + QRType m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; +}; + +/*** preconditioner using HouseholderQR ***/ + +template +class qr_preconditioner_impl +{ +public: + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.rows() > matrix.cols()) + { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) + { + svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); + } + if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); + return true; + } + return false; + } +private: + typedef HouseholderQR QRType; + QRType m_qr; + typename internal::plain_col_type::type m_workspace; +}; + +template +class qr_preconditioner_impl +{ +public: + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + + typedef typename internal::make_proper_matrix_type< + Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime + >::type TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr.~QRType(); + ::new (&m_qr) QRType(svd.cols(), svd.rows()); + } + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); + m_adjoint.resize(svd.cols(), svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) + { + if(matrix.cols() > matrix.rows()) + { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) + { + svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); + } + if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); + return true; + } + else return false; + } + +private: + typedef HouseholderQR QRType; + QRType m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; +}; + +/*** 2x2 SVD implementation + *** + *** JacobiSVD consists in performing a series of 2x2 SVD subproblems + ***/ + +template +struct svd_precondition_2x2_block_to_be_real +{ + typedef JacobiSVD SVD; + typedef typename MatrixType::RealScalar RealScalar; + static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; } +}; + +template +struct svd_precondition_2x2_block_to_be_real +{ + typedef JacobiSVD SVD; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry) + { + using std::sqrt; + using std::abs; + Scalar z; + JacobiRotation rot; + RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); + + const RealScalar considerAsZero = (std::numeric_limits::min)(); + const RealScalar precision = NumTraits::epsilon(); + + if(n==0) + { + // make sure first column is zero + work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0); + + if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero) + { + // work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.row(p) *= z; + if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); + } + if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero) + { + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + // otherwise the second row is already zero, so we have nothing to do. + } + else + { + rot.c() = conj(work_matrix.coeff(p,p)) / n; + rot.s() = work_matrix.coeff(q,p) / n; + work_matrix.applyOnTheLeft(p,q,rot); + if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); + if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero) + { + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.col(q) *= z; + if(svd.computeV()) svd.m_matrixV.col(q) *= z; + } + if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero) + { + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + } + + // update largest diagonal entry + maxDiagEntry = numext::maxi(maxDiagEntry,numext::maxi(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q)))); + // and check whether the 2x2 block is already diagonal + RealScalar threshold = numext::maxi(considerAsZero, precision * maxDiagEntry); + return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold; + } +}; + +template +struct traits > + : traits<_MatrixType> +{ + typedef _MatrixType MatrixType; +}; + +} // end namespace internal + +/** \ingroup SVD_Module + * + * + * \class JacobiSVD + * + * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix + * + * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition + * \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally + * for the R-SVD step for non-square matrices. See discussion of possible values below. + * + * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product + * \f[ A = U S V^* \f] + * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; + * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left + * and right \em singular \em vectors of \a A respectively. + * + * Singular values are always sorted in decreasing order. + * + * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly. + * + * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the + * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual + * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, + * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. + * + * Here's an example demonstrating basic usage: + * \include JacobiSVD_basic.cpp + * Output: \verbinclude JacobiSVD_basic.out + * + * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than + * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and + * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. + * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension. + * + * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to + * terminate in finite (and reasonable) time. + * + * The possible values for QRPreconditioner are: + * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. + * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. + * Contrary to other QRs, it doesn't allow computing thin unitaries. + * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR. + * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization + * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive + * process is more reliable than the optimized bidiagonal SVD iterations. + * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing + * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in + * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking + * if QR preconditioning is needed before applying it anyway. + * + * \sa MatrixBase::jacobiSvd() + */ +template class JacobiSVD + : public SVDBase > +{ + typedef SVDBase Base; + public: + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef typename Base::MatrixUType MatrixUType; + typedef typename Base::MatrixVType MatrixVType; + typedef typename Base::SingularValuesType SingularValuesType; + + typedef typename internal::plain_row_type::type RowType; + typedef typename internal::plain_col_type::type ColType; + typedef Matrix + WorkMatrixType; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via JacobiSVD::compute(const MatrixType&). + */ + JacobiSVD() + {} + + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem size. + * \sa JacobiSVD() + */ + JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) + { + allocate(rows, cols, computationOptions); + } + + /** \brief Constructor performing the decomposition of given matrix. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non-default) FullPivHouseholderQR preconditioner. + */ + explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + { + compute(matrix, computationOptions); + } + + /** \brief Method performing the decomposition of given matrix using custom options. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non-default) FullPivHouseholderQR preconditioner. + */ + JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + JacobiSVD& compute(const MatrixType& matrix) + { + return compute(matrix, m_computationOptions); + } + + using Base::computeU; + using Base::computeV; + using Base::rows; + using Base::cols; + using Base::rank; + + private: + void allocate(Index rows, Index cols, unsigned int computationOptions); + + protected: + using Base::m_matrixU; + using Base::m_matrixV; + using Base::m_singularValues; + using Base::m_info; + using Base::m_isInitialized; + using Base::m_isAllocated; + using Base::m_usePrescribedThreshold; + using Base::m_computeFullU; + using Base::m_computeThinU; + using Base::m_computeFullV; + using Base::m_computeThinV; + using Base::m_computationOptions; + using Base::m_nonzeroSingularValues; + using Base::m_rows; + using Base::m_cols; + using Base::m_diagSize; + using Base::m_prescribedThreshold; + WorkMatrixType m_workMatrix; + + template + friend struct internal::svd_precondition_2x2_block_to_be_real; + template + friend struct internal::qr_preconditioner_impl; + + internal::qr_preconditioner_impl m_qr_precond_morecols; + internal::qr_preconditioner_impl m_qr_precond_morerows; + MatrixType m_scaledMatrix; +}; + +template +void JacobiSVD::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions) +{ + eigen_assert(rows >= 0 && cols >= 0); + + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return; + } + + m_rows = rows; + m_cols = cols; + m_info = Success; + m_isInitialized = false; + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns."); + if (QRPreconditioner == FullPivHouseholderQRPreconditioner) + { + eigen_assert(!(m_computeThinU || m_computeThinV) && + "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " + "Use the ColPivHouseholderQR preconditioner instead."); + } + m_diagSize = (std::min)(m_rows, m_cols); + m_singularValues.resize(m_diagSize); + if(RowsAtCompileTime==Dynamic) + m_matrixU.resize(m_rows, m_computeFullU ? m_rows + : m_computeThinU ? m_diagSize + : 0); + if(ColsAtCompileTime==Dynamic) + m_matrixV.resize(m_cols, m_computeFullV ? m_cols + : m_computeThinV ? m_diagSize + : 0); + m_workMatrix.resize(m_diagSize, m_diagSize); + + if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); + if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); + if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols); +} + +template +JacobiSVD& +JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) +{ + using std::abs; + allocate(matrix.rows(), matrix.cols(), computationOptions); + + // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, + // only worsening the precision of U and V as we accumulate more rotations + const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); + + // limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) + const RealScalar considerAsZero = (std::numeric_limits::min)(); + + // Scaling factor to reduce over/under-flows + RealScalar scale = matrix.cwiseAbs().template maxCoeff(); + if (!(numext::isfinite)(scale)) { + m_isInitialized = true; + m_info = InvalidInput; + m_nonzeroSingularValues = 0; + return *this; + } + if(scale==RealScalar(0)) scale = RealScalar(1); + + /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ + + if(m_rows!=m_cols) + { + m_scaledMatrix = matrix / scale; + m_qr_precond_morecols.run(*this, m_scaledMatrix); + m_qr_precond_morerows.run(*this, m_scaledMatrix); + } + else + { + m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale; + if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); + if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); + if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); + if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); + } + + /*** step 2. The main Jacobi SVD iteration. ***/ + RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff(); + + bool finished = false; + while(!finished) + { + finished = true; + + // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix + + for(Index p = 1; p < m_diagSize; ++p) + { + for(Index q = 0; q < p; ++q) + { + // if this 2x2 sub-matrix is not diagonal already... + // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't + // keep us iterating forever. Similarly, small denormal numbers are considered zero. + RealScalar threshold = numext::maxi(considerAsZero, precision * maxDiagEntry); + if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold) + { + finished = false; + // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal + // the complex to real operation returns true if the updated 2x2 block is not already diagonal + if(internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q, maxDiagEntry)) + { + JacobiRotation j_left, j_right; + internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); + + // accumulate resulting Jacobi rotations + m_workMatrix.applyOnTheLeft(p,q,j_left); + if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose()); + + m_workMatrix.applyOnTheRight(p,q,j_right); + if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right); + + // keep track of the largest diagonal coefficient + maxDiagEntry = numext::maxi(maxDiagEntry,numext::maxi(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); + } + } + } + } + } + + /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ + + for(Index i = 0; i < m_diagSize; ++i) + { + // For a complex matrix, some diagonal coefficients might note have been + // treated by svd_precondition_2x2_block_to_be_real, and the imaginary part + // of some diagonal entry might not be null. + if(NumTraits::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero) + { + RealScalar a = abs(m_workMatrix.coeff(i,i)); + m_singularValues.coeffRef(i) = abs(a); + if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; + } + else + { + // m_workMatrix.coeff(i,i) is already real, no difficulty: + RealScalar a = numext::real(m_workMatrix.coeff(i,i)); + m_singularValues.coeffRef(i) = abs(a); + if(computeU() && (a +JacobiSVD::PlainObject> +MatrixBase::jacobiSvd(unsigned int computationOptions) const +{ + return JacobiSVD(*this, computationOptions); +} + +} // end namespace Eigen + +#endif // EIGEN_JACOBISVD_H diff --git a/include/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h b/include/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h new file mode 100644 index 0000000000000000000000000000000000000000..ff0516f6116cd1f414388aefc8c14eaa7591100d --- /dev/null +++ b/include/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h @@ -0,0 +1,91 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to LAPACKe + * Singular Value Decomposition - SVD. + ******************************************************************************** +*/ + +#ifndef EIGEN_JACOBISVD_LAPACKE_H +#define EIGEN_JACOBISVD_LAPACKE_H + +namespace Eigen { + +/** \internal Specialization for the data types supported by LAPACKe */ + +#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \ +template<> inline \ +JacobiSVD, ColPivHouseholderQRPreconditioner>& \ +JacobiSVD, ColPivHouseholderQRPreconditioner>::compute(const Matrix& matrix, unsigned int computationOptions) \ +{ \ + typedef Matrix MatrixType; \ + /*typedef MatrixType::Scalar Scalar;*/ \ + /*typedef MatrixType::RealScalar RealScalar;*/ \ + allocate(matrix.rows(), matrix.cols(), computationOptions); \ +\ + /*const RealScalar precision = RealScalar(2) * NumTraits::epsilon();*/ \ + m_nonzeroSingularValues = m_diagSize; \ +\ + lapack_int lda = internal::convert_index(matrix.outerStride()), ldu, ldvt; \ + lapack_int matrix_order = LAPACKE_COLROW; \ + char jobu, jobvt; \ + LAPACKE_TYPE *u, *vt, dummy; \ + jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \ + jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \ + if (computeU()) { \ + ldu = internal::convert_index(m_matrixU.outerStride()); \ + u = (LAPACKE_TYPE*)m_matrixU.data(); \ + } else { ldu=1; u=&dummy; }\ + MatrixType localV; \ + lapack_int vt_rows = (m_computeFullV) ? internal::convert_index(m_cols) : (m_computeThinV) ? internal::convert_index(m_diagSize) : 1; \ + if (computeV()) { \ + localV.resize(vt_rows, m_cols); \ + ldvt = internal::convert_index(localV.outerStride()); \ + vt = (LAPACKE_TYPE*)localV.data(); \ + } else { ldvt=1; vt=&dummy; }\ + Matrix superb; superb.resize(m_diagSize, 1); \ + MatrixType m_temp; m_temp = matrix; \ + LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index(m_rows), internal::convert_index(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \ + if (computeV()) m_matrixV = localV.adjoint(); \ + /* for(int i=0;i +// Copyright (C) 2014 Gael Guennebaud +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SVDBASE_H +#define EIGEN_SVDBASE_H + +namespace Eigen { + +namespace internal { +template struct traits > + : traits +{ + typedef MatrixXpr XprKind; + typedef SolverStorage StorageKind; + typedef int StorageIndex; + enum { Flags = 0 }; +}; +} + +/** \ingroup SVD_Module + * + * + * \class SVDBase + * + * \brief Base class of SVD algorithms + * + * \tparam Derived the type of the actual SVD decomposition + * + * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product + * \f[ A = U S V^* \f] + * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; + * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left + * and right \em singular \em vectors of \a A respectively. + * + * Singular values are always sorted in decreasing order. + * + * + * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the + * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual + * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, + * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. + * + * The status of the computation can be retrived using the \a info() method. Unless \a info() returns \a Success, the results should be not + * considered well defined. + * + * If the input matrix has inf or nan coefficients, the result of the computation is undefined, and \a info() will return \a InvalidInput, but the computation is guaranteed to + * terminate in finite (and reasonable) time. + * \sa class BDCSVD, class JacobiSVD + */ +template class SVDBase + : public SolverBase > +{ +public: + + template + friend struct internal::solve_assertion; + + typedef typename internal::traits::MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef Matrix MatrixUType; + typedef Matrix MatrixVType; + typedef typename internal::plain_diag_type::type SingularValuesType; + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** \returns the \a U matrix. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, + * the U matrix is n-by-n if you asked for \link Eigen::ComputeFullU ComputeFullU \endlink, and is n-by-m if you asked for \link Eigen::ComputeThinU ComputeThinU \endlink. + * + * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. + * + * This method asserts that you asked for \a U to be computed. + */ + const MatrixUType& matrixU() const + { + _check_compute_assertions(); + eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return m_matrixU; + } + + /** \returns the \a V matrix. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, + * the V matrix is p-by-p if you asked for \link Eigen::ComputeFullV ComputeFullV \endlink, and is p-by-m if you asked for \link Eigen::ComputeThinV ComputeThinV \endlink. + * + * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. + * + * This method asserts that you asked for \a V to be computed. + */ + const MatrixVType& matrixV() const + { + _check_compute_assertions(); + eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return m_matrixV; + } + + /** \returns the vector of singular values. + * + * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the + * returned vector has size \a m. Singular values are always sorted in decreasing order. + */ + const SingularValuesType& singularValues() const + { + _check_compute_assertions(); + return m_singularValues; + } + + /** \returns the number of singular values that are not exactly 0 */ + Index nonzeroSingularValues() const + { + _check_compute_assertions(); + return m_nonzeroSingularValues; + } + + /** \returns the rank of the matrix of which \c *this is the SVD. + * + * \note This method has to determine which singular values should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + _check_compute_assertions(); + if(m_singularValues.size()==0) return 0; + RealScalar premultiplied_threshold = numext::maxi(m_singularValues.coeff(0) * threshold(), (std::numeric_limits::min)()); + Index i = m_nonzeroSingularValues-1; + while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + return i+1; + } + + /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(), + * which need to determine when singular values are to be considered nonzero. + * This is not used for the SVD decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). + * The default is \c NumTraits::epsilon() + * + * \param threshold The new value to use as the threshold. + * + * A singular value will be considered nonzero if its value is strictly greater than + * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + Derived& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return derived(); + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code svd.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + Derived& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return derived(); + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + // this temporary is needed to workaround a MSVC issue + Index diagSize = (std::max)(1,m_diagSize); + return m_usePrescribedThreshold ? m_prescribedThreshold + : RealScalar(diagSize)*NumTraits::epsilon(); + } + + /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ + inline bool computeU() const { return m_computeFullU || m_computeThinU; } + /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ + inline bool computeV() const { return m_computeFullV || m_computeThinV; } + + inline Index rows() const { return m_rows; } + inline Index cols() const { return m_cols; } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. + * + * \param b the right-hand-side of the equation to solve. + * + * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. + * + * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. + * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. + */ + template + inline const Solve + solve(const MatrixBase& b) const; + #endif + + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful. + */ + EIGEN_DEVICE_FUNC + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "SVD is not initialized."); + return m_info; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void _solve_impl(const RhsType &rhs, DstType &dst) const; + + template + void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const; + #endif + +protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + void _check_compute_assertions() const { + eigen_assert(m_isInitialized && "SVD is not initialized."); + } + + template + void _check_solve_assertion(const Rhs& b) const { + EIGEN_ONLY_USED_FOR_DEBUG(b); + _check_compute_assertions(); + eigen_assert(computeU() && computeV() && "SVDBase::solve(): Both unitaries U and V are required to be computed (thin unitaries suffice)."); + eigen_assert((Transpose_?cols():rows())==b.rows() && "SVDBase::solve(): invalid number of rows of the right hand side matrix b"); + } + + // return true if already allocated + bool allocate(Index rows, Index cols, unsigned int computationOptions) ; + + MatrixUType m_matrixU; + MatrixVType m_matrixV; + SingularValuesType m_singularValues; + ComputationInfo m_info; + bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; + bool m_computeFullU, m_computeThinU; + bool m_computeFullV, m_computeThinV; + unsigned int m_computationOptions; + Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; + RealScalar m_prescribedThreshold; + + /** \brief Default Constructor. + * + * Default constructor of SVDBase + */ + SVDBase() + : m_info(Success), + m_isInitialized(false), + m_isAllocated(false), + m_usePrescribedThreshold(false), + m_computeFullU(false), + m_computeThinU(false), + m_computeFullV(false), + m_computeThinV(false), + m_computationOptions(0), + m_rows(-1), m_cols(-1), m_diagSize(0) + { + check_template_parameters(); + } + + +}; + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +template +void SVDBase::_solve_impl(const RhsType &rhs, DstType &dst) const +{ + // A = U S V^* + // So A^{-1} = V S^{-1} U^* + + Matrix tmp; + Index l_rank = rank(); + tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs; + tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp; + dst = m_matrixV.leftCols(l_rank) * tmp; +} + +template +template +void SVDBase::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const +{ + // A = U S V^* + // So A^{-*} = U S^{-1} V^* + // And A^{-T} = U_conj S^{-1} V^T + Matrix tmp; + Index l_rank = rank(); + + tmp.noalias() = m_matrixV.leftCols(l_rank).transpose().template conjugateIf() * rhs; + tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp; + dst = m_matrixU.template conjugateIf().leftCols(l_rank) * tmp; +} +#endif + +template +bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + eigen_assert(rows >= 0 && cols >= 0); + + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return true; + } + + m_rows = rows; + m_cols = cols; + m_info = Success; + m_isInitialized = false; + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); + + m_diagSize = (std::min)(m_rows, m_cols); + m_singularValues.resize(m_diagSize); + if(RowsAtCompileTime==Dynamic) + m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); + if(ColsAtCompileTime==Dynamic) + m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0); + + return false; +} + +}// end namespace + +#endif // EIGEN_SVDBASE_H diff --git a/include/eigen/Eigen/src/SVD/UpperBidiagonalization.h b/include/eigen/Eigen/src/SVD/UpperBidiagonalization.h new file mode 100644 index 0000000000000000000000000000000000000000..a5b2f60d2122f7fedcc2bc4f1ce28a98828ec2f5 --- /dev/null +++ b/include/eigen/Eigen/src/SVD/UpperBidiagonalization.h @@ -0,0 +1,415 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Benoit Jacob +// Copyright (C) 2013-2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BIDIAGONALIZATION_H +#define EIGEN_BIDIAGONALIZATION_H + +namespace Eigen { + +namespace internal { +// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. +// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. + +template class UpperBidiagonalization +{ + public: + + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + ColsAtCompileTimeMinusOne = internal::decrement_size::ret + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 + typedef Matrix RowVectorType; + typedef Matrix ColVectorType; + typedef BandMatrix BidiagonalType; + typedef Matrix DiagVectorType; + typedef Matrix SuperDiagVectorType; + typedef HouseholderSequence< + const MatrixType, + const typename internal::remove_all::ConjugateReturnType>::type + > HouseholderUSequenceType; + typedef HouseholderSequence< + const typename internal::remove_all::type, + Diagonal, + OnTheRight + > HouseholderVSequenceType; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via Bidiagonalization::compute(const MatrixType&). + */ + UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {} + + explicit UpperBidiagonalization(const MatrixType& matrix) + : m_householder(matrix.rows(), matrix.cols()), + m_bidiagonal(matrix.cols(), matrix.cols()), + m_isInitialized(false) + { + compute(matrix); + } + + UpperBidiagonalization& compute(const MatrixType& matrix); + UpperBidiagonalization& computeUnblocked(const MatrixType& matrix); + + const MatrixType& householder() const { return m_householder; } + const BidiagonalType& bidiagonal() const { return m_bidiagonal; } + + const HouseholderUSequenceType householderU() const + { + eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); + return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate()); + } + + const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy + { + eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); + return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>()) + .setLength(m_householder.cols()-1) + .setShift(1); + } + + protected: + MatrixType m_householder; + BidiagonalType m_bidiagonal; + bool m_isInitialized; +}; + +// Standard upper bidiagonalization without fancy optimizations +// This version should be faster for small matrix size +template +void upperbidiagonalization_inplace_unblocked(MatrixType& mat, + typename MatrixType::RealScalar *diagonal, + typename MatrixType::RealScalar *upper_diagonal, + typename MatrixType::Scalar* tempData = 0) +{ + typedef typename MatrixType::Scalar Scalar; + + Index rows = mat.rows(); + Index cols = mat.cols(); + + typedef Matrix TempType; + TempType tempVector; + if(tempData==0) + { + tempVector.resize(rows); + tempData = tempVector.data(); + } + + for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k) + { + Index remainingRows = rows - k; + Index remainingCols = cols - k - 1; + + // construct left householder transform in-place in A + mat.col(k).tail(remainingRows) + .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]); + // apply householder transform to remaining part of A on the left + mat.bottomRightCorner(remainingRows, remainingCols) + .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData); + + if(k == cols-1) break; + + // construct right householder transform in-place in mat + mat.row(k).tail(remainingCols) + .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]); + // apply householder transform to remaining part of mat on the left + mat.bottomRightCorner(remainingRows-1, remainingCols) + .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData); + } +} + +/** \internal + * Helper routine for the block reduction to upper bidiagonal form. + * + * Let's partition the matrix A: + * + * | A00 A01 | + * A = | | + * | A10 A11 | + * + * This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10] + * and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11 + * is updated using matrix-matrix products: + * A22 -= V * Y^T - X * U^T + * where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01 + * respectively, and the update matrices X and Y are computed during the reduction. + * + */ +template +void upperbidiagonalization_blocked_helper(MatrixType& A, + typename MatrixType::RealScalar *diagonal, + typename MatrixType::RealScalar *upper_diagonal, + Index bs, + Ref::Flags & RowMajorBit> > X, + Ref::Flags & RowMajorBit> > Y) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename NumTraits::Literal Literal; + static const int StorageOrder = + (traits::Flags & RowMajorBit) ? RowMajor : ColMajor; + typedef InnerStride ColInnerStride; + typedef InnerStride RowInnerStride; + typedef Ref, 0, ColInnerStride> SubColumnType; + typedef Ref, 0, RowInnerStride> SubRowType; + typedef Ref > SubMatType; + + Index brows = A.rows(); + Index bcols = A.cols(); + + Scalar tau_u, tau_u_prev(0), tau_v; + + for(Index k = 0; k < bs; ++k) + { + Index remainingRows = brows - k; + Index remainingCols = bcols - k - 1; + + SubMatType X_k1( X.block(k,0, remainingRows,k) ); + SubMatType V_k1( A.block(k,0, remainingRows,k) ); + + // 1 - update the k-th column of A + SubColumnType v_k = A.col(k).tail(remainingRows); + v_k -= V_k1 * Y.row(k).head(k).adjoint(); + if(k) v_k -= X_k1 * A.col(k).head(k); + + // 2 - construct left Householder transform in-place + v_k.makeHouseholderInPlace(tau_v, diagonal[k]); + + if(k+10) A.coeffRef(k-1,k) = tau_u_prev; + tau_u_prev = tau_u; + } + else + A.coeffRef(k-1,k) = tau_u_prev; + + A.coeffRef(k,k) = tau_v; + } + + if(bsbs && brows>bs) + { + SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) ); + SubMatType A10( A.block(bs,0, brows-bs,bs) ); + SubMatType A01( A.block(0,bs, bs,bcols-bs) ); + Scalar tmp = A01(bs-1,0); + A01(bs-1,0) = Literal(1); + A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint(); + A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01; + A01(bs-1,0) = tmp; + } +} + +/** \internal + * + * Implementation of a block-bidiagonal reduction. + * It is based on the following paper: + * The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form. + * by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995) + * section 3.3 + */ +template +void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal, + Index maxBlockSize=32, + typename MatrixType::Scalar* /*tempData*/ = 0) +{ + typedef typename MatrixType::Scalar Scalar; + typedef Block BlockType; + + Index rows = A.rows(); + Index cols = A.cols(); + Index size = (std::min)(rows, cols); + + // X and Y are work space + enum { StorageOrder = (traits::Flags & RowMajorBit) ? RowMajor : ColMajor }; + Matrix X(rows,maxBlockSize); + Matrix Y(cols,maxBlockSize); + Index blockSize = (std::min)(maxBlockSize,size); + + Index k = 0; + for(k = 0; k < size; k += blockSize) + { + Index bs = (std::min)(size-k,blockSize); // actual size of the block + Index brows = rows - k; // rows of the block + Index bcols = cols - k; // columns of the block + + // partition the matrix A: + // + // | A00 A01 A02 | + // | | + // A = | A10 A11 A12 | + // | | + // | A20 A21 A22 | + // + // where A11 is a bs x bs diagonal block, + // and let: + // | A11 A12 | + // B = | | + // | A21 A22 | + + BlockType B = A.block(k,k,brows,bcols); + + // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22. + // Finally, the algorithm continue on the updated A22. + // + // However, if B is too small, or A22 empty, then let's use an unblocked strategy + if(k+bs==cols || bcols<48) // somewhat arbitrary threshold + { + upperbidiagonalization_inplace_unblocked(B, + &(bidiagonal.template diagonal<0>().coeffRef(k)), + &(bidiagonal.template diagonal<1>().coeffRef(k)), + X.data() + ); + break; // We're done + } + else + { + upperbidiagonalization_blocked_helper( B, + &(bidiagonal.template diagonal<0>().coeffRef(k)), + &(bidiagonal.template diagonal<1>().coeffRef(k)), + bs, + X.topLeftCorner(brows,bs), + Y.topLeftCorner(bcols,bs) + ); + } + } +} + +template +UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix) +{ + Index rows = matrix.rows(); + Index cols = matrix.cols(); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + + eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); + + m_householder = matrix; + + ColVectorType temp(rows); + + upperbidiagonalization_inplace_unblocked(m_householder, + &(m_bidiagonal.template diagonal<0>().coeffRef(0)), + &(m_bidiagonal.template diagonal<1>().coeffRef(0)), + temp.data()); + + m_isInitialized = true; + return *this; +} + +template +UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) +{ + Index rows = matrix.rows(); + Index cols = matrix.cols(); + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + + eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); + + m_householder = matrix; + upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal); + + m_isInitialized = true; + return *this; +} + +#if 0 +/** \return the Householder QR decomposition of \c *this. + * + * \sa class Bidiagonalization + */ +template +const UpperBidiagonalization::PlainObject> +MatrixBase::bidiagonalization() const +{ + return UpperBidiagonalization(eval()); +} +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BIDIAGONALIZATION_H diff --git a/include/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h b/include/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h new file mode 100644 index 0000000000000000000000000000000000000000..8b6730ede025dd4c9600baf29a7fe6fafc4f79bc --- /dev/null +++ b/include/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h @@ -0,0 +1,115 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This file is a base class plugin containing common coefficient wise functions. + +/** \returns an expression of the difference of \c *this and \a other + * + * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). + * + * \sa class CwiseBinaryOp, operator-=() + */ +EIGEN_MAKE_CWISE_BINARY_OP(operator-,difference) + +/** \returns an expression of the sum of \c *this and \a other + * + * \note If you want to add a given scalar to all coefficients, see Cwise::operator+(). + * + * \sa class CwiseBinaryOp, operator+=() + */ +EIGEN_MAKE_CWISE_BINARY_OP(operator+,sum) + +/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other + * + * The template parameter \a CustomBinaryOp is the type of the functor + * of the custom operator (see class CwiseBinaryOp for an example) + * + * Here is an example illustrating the use of custom functors: + * \include class_CwiseBinaryOp.cpp + * Output: \verbinclude class_CwiseBinaryOp.out + * + * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct() + */ +template +EIGEN_DEVICE_FUNC +EIGEN_STRONG_INLINE const CwiseBinaryOp +binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other, const CustomBinaryOp& func = CustomBinaryOp()) const +{ + return CwiseBinaryOp(derived(), other.derived(), func); +} + + +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP(operator*,product) +#else +/** \returns an expression of \c *this scaled by the scalar factor \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template +const CwiseBinaryOp,Derived,Constant > operator*(const T& scalar) const; +/** \returns an expression of \a expr scaled by the scalar factor \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template friend +const CwiseBinaryOp,Constant,Derived> operator*(const T& scalar, const StorageBaseType& expr); +#endif + + + +#ifndef EIGEN_PARSED_BY_DOXYGEN +EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(operator/,quotient) +#else +/** \returns an expression of \c *this divided by the scalar value \a scalar + * + * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. + */ +template +const CwiseBinaryOp,Derived,Constant > operator/(const T& scalar) const; +#endif + +/** \returns an expression of the coefficient-wise boolean \b and operator of \c *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_and.cpp + * Output: \verbinclude Cwise_boolean_and.out + * + * \sa operator||(), select() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp +operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} + +/** \returns an expression of the coefficient-wise boolean \b or operator of \c *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_or.cpp + * Output: \verbinclude Cwise_boolean_or.out + * + * \sa operator&&(), select() + */ +template +EIGEN_DEVICE_FUNC +inline const CwiseBinaryOp +operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} diff --git a/include/eigen/Eigen/src/plugins/IndexedViewMethods.h b/include/eigen/Eigen/src/plugins/IndexedViewMethods.h new file mode 100644 index 0000000000000000000000000000000000000000..15c35b0bf886fa964c38e86ce9386adefc01788a --- /dev/null +++ b/include/eigen/Eigen/src/plugins/IndexedViewMethods.h @@ -0,0 +1,262 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if !defined(EIGEN_PARSED_BY_DOXYGEN) + +// This file is automatically included twice to generate const and non-const versions + +#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS +#define EIGEN_INDEXED_VIEW_METHOD_CONST const +#define EIGEN_INDEXED_VIEW_METHOD_TYPE ConstIndexedViewType +#else +#define EIGEN_INDEXED_VIEW_METHOD_CONST +#define EIGEN_INDEXED_VIEW_METHOD_TYPE IndexedViewType +#endif + +#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS +protected: + +// define some aliases to ease readability + +template +struct IvcRowType : public internal::IndexedViewCompatibleType {}; + +template +struct IvcColType : public internal::IndexedViewCompatibleType {}; + +template +struct IvcType : public internal::IndexedViewCompatibleType {}; + +typedef typename internal::IndexedViewCompatibleType::type IvcIndex; + +template +typename IvcRowType::type +ivcRow(const Indices& indices) const { + return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic(derived().rows()),Specialized); +} + +template +typename IvcColType::type +ivcCol(const Indices& indices) const { + return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic(derived().cols()),Specialized); +} + +template +typename IvcColType::type +ivcSize(const Indices& indices) const { + return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic(derived().size()),Specialized); +} + +public: + +#endif + +template +struct EIGEN_INDEXED_VIEW_METHOD_TYPE { + typedef IndexedView::type, + typename IvcColType::type> type; +}; + +// This is the generic version + +template +typename internal::enable_if::value + && internal::traits::type>::ReturnAsIndexedView, + typename EIGEN_INDEXED_VIEW_METHOD_TYPE::type >::type +operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + return typename EIGEN_INDEXED_VIEW_METHOD_TYPE::type + (derived(), ivcRow(rowIndices), ivcCol(colIndices)); +} + +// The following overload returns a Block<> object + +template +typename internal::enable_if::value + && internal::traits::type>::ReturnAsBlock, + typename internal::traits::type>::BlockType>::type +operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + typedef typename internal::traits::type>::BlockType BlockType; + typename IvcRowType::type actualRowIndices = ivcRow(rowIndices); + typename IvcColType::type actualColIndices = ivcCol(colIndices); + return BlockType(derived(), + internal::first(actualRowIndices), + internal::first(actualColIndices), + internal::index_list_size(actualRowIndices), + internal::index_list_size(actualColIndices)); +} + +// The following overload returns a Scalar + +template +typename internal::enable_if::value + && internal::traits::type>::ReturnAsScalar, + CoeffReturnType >::type +operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + return Base::operator()(internal::eval_expr_given_size(rowIndices,rows()),internal::eval_expr_given_size(colIndices,cols())); +} + +#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE + +// The following three overloads are needed to handle raw Index[N] arrays. + +template +IndexedView::type> +operator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + return IndexedView::type> + (derived(), rowIndices, ivcCol(colIndices)); +} + +template +IndexedView::type, const ColIndicesT (&)[ColIndicesN]> +operator()(const RowIndices& rowIndices, const ColIndicesT (&colIndices)[ColIndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + return IndexedView::type,const ColIndicesT (&)[ColIndicesN]> + (derived(), ivcRow(rowIndices), colIndices); +} + +template +IndexedView +operator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndicesT (&colIndices)[ColIndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + return IndexedView + (derived(), rowIndices, colIndices); +} + +#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE + +// Overloads for 1D vectors/arrays + +template +typename internal::enable_if< + IsRowMajor && (!(internal::get_compile_time_incr::type>::value==1 || internal::is_valid_index_type::value)), + IndexedView::type> >::type +operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return IndexedView::type> + (derived(), IvcIndex(0), ivcCol(indices)); +} + +template +typename internal::enable_if< + (!IsRowMajor) && (!(internal::get_compile_time_incr::type>::value==1 || internal::is_valid_index_type::value)), + IndexedView::type,IvcIndex> >::type +operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return IndexedView::type,IvcIndex> + (derived(), ivcRow(indices), IvcIndex(0)); +} + +template +typename internal::enable_if< + (internal::get_compile_time_incr::type>::value==1) && (!internal::is_valid_index_type::value) && (!symbolic::is_symbolic::value), + VectorBlock::value> >::type +operator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + typename IvcType::type actualIndices = ivcSize(indices); + return VectorBlock::value> + (derived(), internal::first(actualIndices), internal::index_list_size(actualIndices)); +} + +template +typename internal::enable_if::value, CoeffReturnType >::type +operator()(const IndexType& id) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + return Base::operator()(internal::eval_expr_given_size(id,size())); +} + +#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE + +template +typename internal::enable_if >::type +operator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return IndexedView + (derived(), IvcIndex(0), indices); +} + +template +typename internal::enable_if >::type +operator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return IndexedView + (derived(), indices, IvcIndex(0)); +} + +#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE + +#undef EIGEN_INDEXED_VIEW_METHOD_CONST +#undef EIGEN_INDEXED_VIEW_METHOD_TYPE + +#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS +#define EIGEN_INDEXED_VIEW_METHOD_2ND_PASS +#include "IndexedViewMethods.h" +#undef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS +#endif + +#else // EIGEN_PARSED_BY_DOXYGEN + +/** + * \returns a generic submatrix view defined by the rows and columns indexed \a rowIndices and \a colIndices respectively. + * + * Each parameter must either be: + * - An integer indexing a single row or column + * - Eigen::all indexing the full set of respective rows or columns in increasing order + * - An ArithmeticSequence as returned by the Eigen::seq and Eigen::seqN functions + * - Any %Eigen's vector/array of integers or expressions + * - Plain C arrays: \c int[N] + * - And more generally any type exposing the following two member functions: + * \code + * operator[]() const; + * size() const; + * \endcode + * where \c stands for any integer type compatible with Eigen::Index (i.e. \c std::ptrdiff_t). + * + * The last statement implies compatibility with \c std::vector, \c std::valarray, \c std::array, many of the Range-v3's ranges, etc. + * + * If the submatrix can be represented using a starting position \c (i,j) and positive sizes \c (rows,columns), then this + * method will returns a Block object after extraction of the relevant information from the passed arguments. This is the case + * when all arguments are either: + * - An integer + * - Eigen::all + * - An ArithmeticSequence with compile-time increment strictly equal to 1, as returned by Eigen::seq(a,b), and Eigen::seqN(a,N). + * + * Otherwise a more general IndexedView object will be returned, after conversion of the inputs + * to more suitable types \c RowIndices' and \c ColIndices'. + * + * For 1D vectors and arrays, you better use the operator()(const Indices&) overload, which behave the same way but taking a single parameter. + * + * See also this question and its answer for an example of how to duplicate coefficients. + * + * \sa operator()(const Indices&), class Block, class IndexedView, DenseBase::block(Index,Index,Index,Index) + */ +template +IndexedView_or_Block +operator()(const RowIndices& rowIndices, const ColIndices& colIndices); + +/** This is an overload of operator()(const RowIndices&, const ColIndices&) for 1D vectors or arrays + * + * \only_for_vectors + */ +template +IndexedView_or_VectorBlock +operator()(const Indices& indices); + +#endif // EIGEN_PARSED_BY_DOXYGEN diff --git a/include/eigen/doc/CoeffwiseMathFunctionsTable.dox b/include/eigen/doc/CoeffwiseMathFunctionsTable.dox new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a2c071ea94d4f79d313175e594fdd9eb6ca --- /dev/null +++ b/include/eigen/doc/CoeffwiseMathFunctionsTable.dox @@ -0,0 +1,600 @@ +namespace Eigen { + +/** \eigenManualPage CoeffwiseMathFunctions Catalog of coefficient-wise math functions + + + + +This table presents a catalog of the coefficient-wise math functions supported by %Eigen. +In this table, \c a, \c b, refer to Array objects or expressions, and \c m refers to a linear algebra Matrix/Vector object. Standard scalar types are abbreviated as follows: + - \c int: \c i32 + - \c float: \c f + - \c double: \c d + - \c std::complex: \c cf + - \c std::complex: \c cd + +For each row, the first column list the equivalent calls for arrays, and matrices when supported. Of course, all functions are available for matrices by first casting it as an array: \c m.array(). + +The third column gives some hints in the underlying scalar implementation. In most cases, %Eigen does not implement itself the math function but relies on the STL for standard scalar types, or user-provided functions for custom scalar types. +For instance, some simply calls the respective function of the STL while preserving argument-dependent lookup for custom types. +The following: +\code +using std::foo; +foo(a[i]); +\endcode +means that the STL's function \c std::foo will be potentially called if it is compatible with the underlying scalar type. If not, then the user must ensure that an overload of the function foo is available for the given scalar type (usually defined in the same namespace as the given scalar type). +This also means that, unless specified, if the function \c std::foo is available only in some recent c++ versions (e.g., c++11), then the respective %Eigen's function/method will be usable on standard types only if the compiler support the required c++ version. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
APIDescriptionDefault scalar implementationSIMD
Basic operations
+ \anchor cwisetable_abs + a.\link ArrayBase::abs abs\endlink(); \n + \link Eigen::abs abs\endlink(a); \n + m.\link MatrixBase::cwiseAbs cwiseAbs\endlink(); + absolute value (\f$ |a_i| \f$) + using std::abs; \n + abs(a[i]); + SSE2, AVX (i32,f,d)
+ \anchor cwisetable_inverse + a.\link ArrayBase::inverse inverse\endlink(); \n + \link Eigen::inverse inverse\endlink(a); \n + m.\link MatrixBase::cwiseInverse cwiseInverse\endlink(); + inverse value (\f$ 1/a_i \f$) + 1/a[i]; + All engines (f,d,fc,fd)
+ \anchor cwisetable_conj + a.\link ArrayBase::conjugate conjugate\endlink(); \n + \link Eigen::conj conj\endlink(a); \n + m.\link MatrixBase::conjugate conjugate\endlink(); + complex conjugate (\f$ \bar{a_i} \f$),\n + no-op for real + using std::conj; \n + conj(a[i]); + All engines (fc,fd)
+ \anchor cwisetable_arg + a.\link ArrayBase::arg arg\endlink(); \n + \link Eigen::arg arg\endlink(a); \n + m.\link MatrixBase::cwiseArg cwiseArg\endlink(); + phase angle of complex number + using std::arg; \n + arg(a[i]); + All engines (fc,fd)
Exponential functions
+ \anchor cwisetable_exp + a.\link ArrayBase::exp exp\endlink(); \n + \link Eigen::exp exp\endlink(a); + \f$ e \f$ raised to the given power (\f$ e^{a_i} \f$) + using std::exp; \n + exp(a[i]); + SSE2, AVX (f,d)
+ \anchor cwisetable_log + a.\link ArrayBase::log log\endlink(); \n + \link Eigen::log log\endlink(a); + natural (base \f$ e \f$) logarithm (\f$ \ln({a_i}) \f$) + using std::log; \n + log(a[i]); + SSE2, AVX (f)
+ \anchor cwisetable_log1p + a.\link ArrayBase::log1p log1p\endlink(); \n + \link Eigen::log1p log1p\endlink(a); + natural (base \f$ e \f$) logarithm of 1 plus \n the given number (\f$ \ln({1+a_i}) \f$)built-in generic implementation based on \c log,\n + plus \c using \c std::log1p ; \cpp11
+ \anchor cwisetable_log10 + a.\link ArrayBase::log10 log10\endlink(); \n + \link Eigen::log10 log10\endlink(a); + base 10 logarithm (\f$ \log_{10}({a_i}) \f$) + using std::log10; \n + log10(a[i]); +
Power functions
+ \anchor cwisetable_pow + a.\link ArrayBase::pow pow\endlink(b); \n + \link ArrayBase::pow(const Eigen::ArrayBase< Derived > &x, const Eigen::ArrayBase< ExponentDerived > &exponents) pow\endlink(a,b); + raises a number to the given power (\f$ a_i ^ {b_i} \f$) \n \c a and \c b can be either an array or scalar. + using std::pow; \n + pow(a[i],b[i]);\n + (plus builtin for integer types)
+ \anchor cwisetable_sqrt + a.\link ArrayBase::sqrt sqrt\endlink(); \n + \link Eigen::sqrt sqrt\endlink(a);\n + m.\link MatrixBase::cwiseSqrt cwiseSqrt\endlink(); + computes square root (\f$ \sqrt a_i \f$) + using std::sqrt; \n + sqrt(a[i]);SSE2, AVX (f,d)
+ \anchor cwisetable_rsqrt + a.\link ArrayBase::rsqrt rsqrt\endlink(); \n + \link Eigen::rsqrt rsqrt\endlink(a); + reciprocal square root (\f$ 1/{\sqrt a_i} \f$) + using std::sqrt; \n + 1/sqrt(a[i]); \n + SSE2, AVX, AltiVec, ZVector (f,d)\n + (approx + 1 Newton iteration)
+ \anchor cwisetable_square + a.\link ArrayBase::square square\endlink(); \n + \link Eigen::square square\endlink(a); + computes square power (\f$ a_i^2 \f$) + a[i]*a[i]All (i32,f,d,cf,cd)
+ \anchor cwisetable_cube + a.\link ArrayBase::cube cube\endlink(); \n + \link Eigen::cube cube\endlink(a); + computes cubic power (\f$ a_i^3 \f$) + a[i]*a[i]*a[i]All (i32,f,d,cf,cd)
+ \anchor cwisetable_abs2 + a.\link ArrayBase::abs2 abs2\endlink(); \n + \link Eigen::abs2 abs2\endlink(a);\n + m.\link MatrixBase::cwiseAbs2 cwiseAbs2\endlink(); + computes the squared absolute value (\f$ |a_i|^2 \f$) + real: a[i]*a[i] \n + complex: real(a[i])*real(a[i]) \n +        + imag(a[i])*imag(a[i])All (i32,f,d)
Trigonometric functions
+ \anchor cwisetable_sin + a.\link ArrayBase::sin sin\endlink(); \n + \link Eigen::sin sin\endlink(a); + computes sine + using std::sin; \n + sin(a[i]);SSE2, AVX (f)
+ \anchor cwisetable_cos + a.\link ArrayBase::cos cos\endlink(); \n + \link Eigen::cos cos\endlink(a); + computes cosine + using std::cos; \n + cos(a[i]);SSE2, AVX (f)
+ \anchor cwisetable_tan + a.\link ArrayBase::tan tan\endlink(); \n + \link Eigen::tan tan\endlink(a); + computes tangent + using std::tan; \n + tan(a[i]);
+ \anchor cwisetable_asin + a.\link ArrayBase::asin asin\endlink(); \n + \link Eigen::asin asin\endlink(a); + computes arc sine (\f$ \sin^{-1} a_i \f$) + using std::asin; \n + asin(a[i]);
+ \anchor cwisetable_acos + a.\link ArrayBase::acos acos\endlink(); \n + \link Eigen::acos acos\endlink(a); + computes arc cosine (\f$ \cos^{-1} a_i \f$) + using std::acos; \n + acos(a[i]);
+ \anchor cwisetable_atan + a.\link ArrayBase::atan atan\endlink(); \n + \link Eigen::atan atan\endlink(a); + computes arc tangent (\f$ \tan^{-1} a_i \f$) + using std::atan; \n + atan(a[i]);
Hyperbolic functions
+ \anchor cwisetable_sinh + a.\link ArrayBase::sinh sinh\endlink(); \n + \link Eigen::sinh sinh\endlink(a); + computes hyperbolic sine + using std::sinh; \n + sinh(a[i]);
+ \anchor cwisetable_cosh + a.\link ArrayBase::cosh cosh\endlink(); \n + \link Eigen::cosh cosh\endlink(a); + computes hyperbolic cosine + using std::cosh; \n + cosh(a[i]);
+ \anchor cwisetable_tanh + a.\link ArrayBase::tanh tanh\endlink(); \n + \link Eigen::tanh tanh\endlink(a); + computes hyperbolic tangent + using std::tanh; \n + tanh(a[i]);
+ \anchor cwisetable_asinh + a.\link ArrayBase::asinh asinh\endlink(); \n + \link Eigen::asinh asinh\endlink(a); + computes inverse hyperbolic sine + using std::asinh; \n + asinh(a[i]);
+ \anchor cwisetable_acosh + a.\link ArrayBase::acosh cohs\endlink(); \n + \link Eigen::acosh acosh\endlink(a); + computes hyperbolic cosine + using std::acosh; \n + acosh(a[i]);
+ \anchor cwisetable_atanh + a.\link ArrayBase::atanh atanh\endlink(); \n + \link Eigen::atanh atanh\endlink(a); + computes hyperbolic tangent + using std::atanh; \n + atanh(a[i]);
Nearest integer floating point operations
+ \anchor cwisetable_ceil + a.\link ArrayBase::ceil ceil\endlink(); \n + \link Eigen::ceil ceil\endlink(a); + nearest integer not less than the given value + using std::ceil; \n + ceil(a[i]);SSE4,AVX,ZVector (f,d)
+ \anchor cwisetable_floor + a.\link ArrayBase::floor floor\endlink(); \n + \link Eigen::floor floor\endlink(a); + nearest integer not greater than the given value + using std::floor; \n + floor(a[i]);SSE4,AVX,ZVector (f,d)
+ \anchor cwisetable_round + a.\link ArrayBase::round round\endlink(); \n + \link Eigen::round round\endlink(a); + nearest integer, \n rounding away from zero in halfway casesbuilt-in generic implementation \n based on \c floor and \c ceil,\n + plus \c using \c std::round ; \cpp11SSE4,AVX,ZVector (f,d)
+ \anchor cwisetable_rint + a.\link ArrayBase::rint rint\endlink(); \n + \link Eigen::rint rint\endlink(a); + nearest integer, \n rounding to nearest even in halfway casesbuilt-in generic implementation using \c std::rint ; \cpp11 + or \c rintf ; SSE4,AVX (f,d)
Floating point manipulation functions
Classification and comparison
+ \anchor cwisetable_isfinite + a.\link ArrayBase::isFinite isFinite\endlink(); \n + \link Eigen::isfinite isfinite\endlink(a); + checks if the given number has finite valuebuilt-in generic implementation,\n + plus \c using \c std::isfinite ; \cpp11
+ \anchor cwisetable_isinf + a.\link ArrayBase::isInf isInf\endlink(); \n + \link Eigen::isinf isinf\endlink(a); + checks if the given number is infinitebuilt-in generic implementation,\n + plus \c using \c std::isinf ; \cpp11
+ \anchor cwisetable_isnan + a.\link ArrayBase::isNaN isNaN\endlink(); \n + \link Eigen::isnan isnan\endlink(a); + checks if the given number is not a numberbuilt-in generic implementation,\n + plus \c using \c std::isnan ; \cpp11
Error and gamma functions
Require \c \#include \c
+ \anchor cwisetable_erf + a.\link ArrayBase::erf erf\endlink(); \n + \link Eigen::erf erf\endlink(a); + error function + using std::erf; \cpp11 \n + erf(a[i]); +
+ \anchor cwisetable_erfc + a.\link ArrayBase::erfc erfc\endlink(); \n + \link Eigen::erfc erfc\endlink(a); + complementary error function + using std::erfc; \cpp11 \n + erfc(a[i]); +
+ \anchor cwisetable_lgamma + a.\link ArrayBase::lgamma lgamma\endlink(); \n + \link Eigen::lgamma lgamma\endlink(a); + natural logarithm of the gamma function + using std::lgamma; \cpp11 \n + lgamma(a[i]); +
+ \anchor cwisetable_digamma + a.\link ArrayBase::digamma digamma\endlink(); \n + digamma(a); + logarithmic derivative of the gamma function + built-in for float and double +
+ \anchor cwisetable_igamma + igamma(a,x); + lower incomplete gamma integral + \n \f$ \gamma(a_i,x_i)= \frac{1}{|a_i|} \int_{0}^{x_i}e^{\text{-}t} t^{a_i-1} \mathrm{d} t \f$ + built-in for float and double,\n but requires \cpp11 +
+ \anchor cwisetable_igammac + igammac(a,x); + upper incomplete gamma integral + \n \f$ \Gamma(a_i,x_i) = \frac{1}{|a_i|} \int_{x_i}^{\infty}e^{\text{-}t} t^{a_i-1} \mathrm{d} t \f$ + built-in for float and double,\n but requires \cpp11 +
Special functions
Require \c \#include \c
+ \anchor cwisetable_polygamma + polygamma(n,x); + n-th derivative of digamma at x + built-in generic based on\n \c lgamma , + \c digamma + and \c zeta . +
+ \anchor cwisetable_betainc + betainc(a,b,x); + Incomplete beta function + built-in for float and double,\n but requires \cpp11 +
+ \anchor cwisetable_zeta + zeta(a,b); \n + a.\link ArrayBase::zeta zeta\endlink(b); + Hurwitz zeta function + \n \f$ \zeta(a_i,b_i)=\sum_{k=0}^{\infty}(b_i+k)^{\text{-}a_i} \f$ + built-in for float and double +
+ \anchor cwisetable_ndtri + a.\link ArrayBase::ndtri ndtri\endlink(); \n + \link Eigen::ndtri ndtri\endlink(a); + Inverse of the CDF of the Normal distribution function + built-in for float and double +
+ +\n + +*/ + +} diff --git a/include/eigen/doc/CustomizingEigen_CustomScalar.dox b/include/eigen/doc/CustomizingEigen_CustomScalar.dox new file mode 100644 index 0000000000000000000000000000000000000000..24e5f563b8cd554fa496d8281de87378aba9bb4f --- /dev/null +++ b/include/eigen/doc/CustomizingEigen_CustomScalar.dox @@ -0,0 +1,120 @@ +namespace Eigen { + +/** \page TopicCustomizing_CustomScalar Using custom scalar types +\anchor user_defined_scalars + +By default, Eigen currently supports standard floating-point types (\c float, \c double, \c std::complex, \c std::complex, \c long \c double), as well as all native integer types (e.g., \c int, \c unsigned \c int, \c short, etc.), and \c bool. +On x86-64 systems, \c long \c double permits to locally enforces the use of x87 registers with extended accuracy (in comparison to SSE). + +In order to add support for a custom type \c T you need: +-# make sure the common operator (+,-,*,/,etc.) are supported by the type \c T +-# add a specialization of struct Eigen::NumTraits (see \ref NumTraits) +-# define the math functions that makes sense for your type. This includes standard ones like sqrt, pow, sin, tan, conj, real, imag, etc, as well as abs2 which is Eigen specific. + (see the file Eigen/src/Core/MathFunctions.h) + +The math function should be defined in the same namespace than \c T, or in the \c std namespace though that second approach is not recommended. + +Here is a concrete example adding support for the Adolc's \c adouble type. Adolc is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives. + +\code +#ifndef ADOLCSUPPORT_H +#define ADOLCSUPPORT_H + +#define ADOLC_TAPELESS +#include +#include + +namespace Eigen { + +template<> struct NumTraits + : NumTraits // permits to get the epsilon, dummy_precision, lowest, highest functions +{ + typedef adtl::adouble Real; + typedef adtl::adouble NonInteger; + typedef adtl::adouble Nested; + + enum { + IsComplex = 0, + IsInteger = 0, + IsSigned = 1, + RequireInitialization = 1, + ReadCost = 1, + AddCost = 3, + MulCost = 3 + }; +}; + +} + +namespace adtl { + +inline const adouble& conj(const adouble& x) { return x; } +inline const adouble& real(const adouble& x) { return x; } +inline adouble imag(const adouble&) { return 0.; } +inline adouble abs(const adouble& x) { return fabs(x); } +inline adouble abs2(const adouble& x) { return x*x; } + +} + +#endif // ADOLCSUPPORT_H +\endcode + +This other example adds support for the \c mpq_class type from GMP. It shows in particular how to change the way Eigen picks the best pivot during LU factorization. It selects the coefficient with the highest score, where the score is by default the absolute value of a number, but we can define a different score, for instance to prefer pivots with a more compact representation (this is an example, not a recommendation). Note that the scores should always be non-negative and only zero is allowed to have a score of zero. Also, this can interact badly with thresholds for inexact scalar types. + +\code +#include +#include +#include + +namespace Eigen { + template<> struct NumTraits : GenericNumTraits + { + typedef mpq_class Real; + typedef mpq_class NonInteger; + typedef mpq_class Nested; + + static inline Real epsilon() { return 0; } + static inline Real dummy_precision() { return 0; } + static inline int digits10() { return 0; } + + enum { + IsInteger = 0, + IsSigned = 1, + IsComplex = 0, + RequireInitialization = 1, + ReadCost = 6, + AddCost = 150, + MulCost = 100 + }; + }; + + namespace internal { + + template<> struct scalar_score_coeff_op { + struct result_type : boost::totally_ordered1 { + std::size_t len; + result_type(int i = 0) : len(i) {} // Eigen uses Score(0) and Score() + result_type(mpq_class const& q) : + len(mpz_size(q.get_num_mpz_t())+ + mpz_size(q.get_den_mpz_t())-1) {} + friend bool operator<(result_type x, result_type y) { + // 0 is the worst possible pivot + if (x.len == 0) return y.len > 0; + if (y.len == 0) return false; + // Prefer a pivot with a small representation + return x.len > y.len; + } + friend bool operator==(result_type x, result_type y) { + // Only used to test if the score is 0 + return x.len == y.len; + } + }; + result_type operator()(mpq_class const& x) const { return x; } + }; + } +} +\endcode + +*/ + +} diff --git a/include/eigen/doc/CustomizingEigen_InheritingMatrix.dox b/include/eigen/doc/CustomizingEigen_InheritingMatrix.dox new file mode 100644 index 0000000000000000000000000000000000000000..b21e554337bc79f1cd7998e76d31d2fbf50341a8 --- /dev/null +++ b/include/eigen/doc/CustomizingEigen_InheritingMatrix.dox @@ -0,0 +1,34 @@ +namespace Eigen { + +/** \page TopicCustomizing_InheritingMatrix Inheriting from Matrix + +Before inheriting from Matrix, be really, I mean REALLY, sure that using +EIGEN_MATRIX_PLUGIN is not what you really want (see previous section). +If you just need to add few members to Matrix, this is the way to go. + +An example of when you actually need to inherit Matrix, is when you +have several layers of heritage such as +MyVerySpecificVector1, MyVerySpecificVector2 -> MyVector1 -> Matrix and +MyVerySpecificVector3, MyVerySpecificVector4 -> MyVector2 -> Matrix. + +In order for your object to work within the %Eigen framework, you need to +define a few members in your inherited class. + +Here is a minimalistic example: + +\include CustomizingEigen_Inheritance.cpp + +Output: \verbinclude CustomizingEigen_Inheritance.out + +This is the kind of error you can get if you don't provide those methods +\verbatim +error: no match for ‘operator=’ in ‘v = Eigen::operator*( +const Eigen::MatrixBase >::Scalar&, +const Eigen::MatrixBase >::StorageBaseType&) +(((const Eigen::MatrixBase >::StorageBaseType&) +((const Eigen::MatrixBase >::StorageBaseType*)(& v))))’ +\endverbatim + +*/ + +} diff --git a/include/eigen/doc/CustomizingEigen_Plugins.dox b/include/eigen/doc/CustomizingEigen_Plugins.dox new file mode 100644 index 0000000000000000000000000000000000000000..9ab0200ff946dc1be9e0d6465ddff920a83a2314 --- /dev/null +++ b/include/eigen/doc/CustomizingEigen_Plugins.dox @@ -0,0 +1,69 @@ +namespace Eigen { + +/** \page TopicCustomizing_Plugins Extending MatrixBase (and other classes) + +In this section we will see how to add custom methods to MatrixBase. Since all expressions and matrix types inherit MatrixBase, adding a method to MatrixBase make it immediately available to all expressions ! A typical use case is, for instance, to make Eigen compatible with another API. + +You certainly know that in C++ it is not possible to add methods to an existing class. So how that's possible ? Here the trick is to include in the declaration of MatrixBase a file defined by the preprocessor token \c EIGEN_MATRIXBASE_PLUGIN: +\code +class MatrixBase { + // ... + #ifdef EIGEN_MATRIXBASE_PLUGIN + #include EIGEN_MATRIXBASE_PLUGIN + #endif +}; +\endcode +Therefore to extend MatrixBase with your own methods you just have to create a file with your method declaration and define EIGEN_MATRIXBASE_PLUGIN before you include any Eigen's header file. + +You can extend many of the other classes used in Eigen by defining similarly named preprocessor symbols. For instance, define \c EIGEN_ARRAYBASE_PLUGIN if you want to extend the ArrayBase class. A full list of classes that can be extended in this way and the corresponding preprocessor symbols can be found on our page \ref TopicPreprocessorDirectives. + +Here is an example of an extension file for adding methods to MatrixBase: \n +\b MatrixBaseAddons.h +\code +inline Scalar at(uint i, uint j) const { return this->operator()(i,j); } +inline Scalar& at(uint i, uint j) { return this->operator()(i,j); } +inline Scalar at(uint i) const { return this->operator[](i); } +inline Scalar& at(uint i) { return this->operator[](i); } + +inline RealScalar squaredLength() const { return squaredNorm(); } +inline RealScalar length() const { return norm(); } +inline RealScalar invLength(void) const { return fast_inv_sqrt(squaredNorm()); } + +template +inline Scalar squaredDistanceTo(const MatrixBase& other) const +{ return (derived() - other.derived()).squaredNorm(); } + +template +inline RealScalar distanceTo(const MatrixBase& other) const +{ return internal::sqrt(derived().squaredDistanceTo(other)); } + +inline void scaleTo(RealScalar l) { RealScalar vl = norm(); if (vl>1e-9) derived() *= (l/vl); } + +inline Transpose transposed() {return this->transpose();} +inline const Transpose transposed() const {return this->transpose();} + +inline uint minComponentId(void) const { int i; this->minCoeff(&i); return i; } +inline uint maxComponentId(void) const { int i; this->maxCoeff(&i); return i; } + +template +void makeFloor(const MatrixBase& other) { derived() = derived().cwiseMin(other.derived()); } +template +void makeCeil(const MatrixBase& other) { derived() = derived().cwiseMax(other.derived()); } + +const CwiseBinaryOp, const Derived, const ConstantReturnType> +operator+(const Scalar& scalar) const +{ return CwiseBinaryOp, const Derived, const ConstantReturnType>(derived(), Constant(rows(),cols(),scalar)); } + +friend const CwiseBinaryOp, const ConstantReturnType, Derived> +operator+(const Scalar& scalar, const MatrixBase& mat) +{ return CwiseBinaryOp, const ConstantReturnType, Derived>(Constant(rows(),cols(),scalar), mat.derived()); } +\endcode + +Then one can add the following declaration in the config.h or whatever prerequisites header file of his project: +\code +#define EIGEN_MATRIXBASE_PLUGIN "MatrixBaseAddons.h" +\endcode + +*/ + +} diff --git a/include/eigen/doc/Doxyfile.in b/include/eigen/doc/Doxyfile.in new file mode 100644 index 0000000000000000000000000000000000000000..3e85cfeb9e4804ba2b0524c2252f9ecea35b6fb0 --- /dev/null +++ b/include/eigen/doc/Doxyfile.in @@ -0,0 +1,180 @@ +# Doxyfile 1.13.0 +PROJECT_NAME = ${EIGEN_DOXY_PROJECT_NAME} +PROJECT_NUMBER = ${EIGEN_VERSION} +PROJECT_LOGO = ${Eigen_SOURCE_DIR}/doc/Eigen_Silly_Professor_64x64.png +OUTPUT_DIRECTORY = ${Eigen_BINARY_DIR}/doc${EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX} +FULL_PATH_NAMES = YES +STRIP_FROM_INC_PATH = ${Eigen_SOURCE_DIR}/ +TAB_SIZE = 8 +ALIASES = "only_for_vectors=This is only for vectors (either row-vectors or column-vectors), i.e. matrices which are known at compile-time to have either one row or one column." \ + "not_reentrant=\warning This function is not re-entrant." \ + "array_module=This is defined in the %Array module. \code #include \endcode" \ + "cholesky_module=This is defined in the %Cholesky module. \code #include \endcode" \ + "eigenvalues_module=This is defined in the %Eigenvalues module. \code #include \endcode" \ + "geometry_module=This is defined in the %Geometry module. \code #include \endcode" \ + "householder_module=This is defined in the %Householder module. \code #include \endcode" \ + "jacobi_module=This is defined in the %Jacobi module. \code #include \endcode" \ + "lu_module=This is defined in the %LU module. \code #include \endcode" \ + "qr_module=This is defined in the %QR module. \code #include \endcode" \ + "svd_module=This is defined in the %SVD module. \code #include \endcode" \ + "specialfunctions_module=This is defined in the \b unsupported SpecialFunctions module. \code #include \endcode" \ + label=\bug \ + "matrixworld=*" \ + "arrayworld=*" \ + "note_about_arbitrary_choice_of_solution=If there exists more than one solution, this method will arbitrarily choose one." \ + "note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \ + "note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \ + "note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \ + "nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"" \ + "implsparsesolverconcept=This class follows the \link TutorialSparseSolverConcept sparse solver concept \endlink." \ + blank= \ + "cpp11=[c++11]" \ + "cpp14=[c++14]" \ + "cpp17=[c++17]" \ + "newin{1}=New in %Eigen \1." \ + eigenAutoToc= \ + eigenManualPage=\defgroup +EXTENSION_MAPPING = .h=C++ \ + no_extension=C++ +DISTRIBUTE_GROUP_DOC = YES +NUM_PROC_THREADS = 0 +EXTRACT_ALL = NO +EXTRACT_PRIVATE = NO +EXTRACT_PRIV_VIRTUAL = NO +EXTRACT_PACKAGE = NO +EXTRACT_STATIC = YES +EXTRACT_LOCAL_CLASSES = NO +EXTRACT_LOCAL_METHODS = NO +EXTRACT_ANON_NSPACES = NO +HIDE_UNDOC_MEMBERS = YES +HIDE_UNDOC_CLASSES = YES +HIDE_FRIEND_COMPOUNDS = YES +CASE_SENSE_NAMES = YES +SORT_BRIEF_DOCS = YES +GENERATE_TESTLIST = NO +MAX_INITIALIZER_LINES = 0 +SHOW_NAMESPACES = NO +LAYOUT_FILE = ${Eigen_BINARY_DIR}/doc${EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX}/eigendoxy_layout.xml +WARN_IF_UNDOCUMENTED = NO +INPUT = ${EIGEN_DOXY_INPUT} +FILE_PATTERNS = * +RECURSIVE = YES +EXCLUDE = ${Eigen_SOURCE_DIR}/Eigen/Eigen2Support \ + ${Eigen_SOURCE_DIR}/Eigen/src/Eigen2Support \ + ${Eigen_SOURCE_DIR}/doc/examples \ + ${Eigen_SOURCE_DIR}/doc/special_examples \ + ${Eigen_SOURCE_DIR}/doc/snippets \ + ${Eigen_SOURCE_DIR}/unsupported/doc/examples \ + ${Eigen_SOURCE_DIR}/unsupported/doc/snippets + +# ${Eigen_SOURCE_DIR}/Eigen/src/Core/products \ +# ${Eigen_SOURCE_DIR}/Eigen/src/Core/util/ForwardDeclarations.h \ + +EXCLUDE_PATTERNS = CMake* \ + *.txt \ + *.sh \ + *.orig \ + *.diff \ + diff \ + *~ \ + *. \ + *.sln \ + *.sdf \ + *.tmp \ + *.vcxproj \ + *.filters \ + *.user \ + *.suo +# The following are pseudo template bases, and not real classes. +# https://github.com/doxygen/doxygen/issues/11289 +EXCLUDE_SYMBOLS = Kernel \ + BinaryOp +EXAMPLE_PATH = ${Eigen_SOURCE_DIR}/doc/snippets \ + ${Eigen_BINARY_DIR}/doc/snippets \ + ${Eigen_SOURCE_DIR}/doc/examples \ + ${Eigen_BINARY_DIR}/doc/examples \ + ${Eigen_SOURCE_DIR}/doc/special_examples \ + ${Eigen_BINARY_DIR}/doc/special_examples \ + ${Eigen_SOURCE_DIR}/unsupported/doc/snippets \ + ${Eigen_BINARY_DIR}/unsupported/doc/snippets \ + ${Eigen_SOURCE_DIR}/unsupported/doc/examples \ + ${Eigen_BINARY_DIR}/unsupported/doc/examples +IMAGE_PATH = ${Eigen_BINARY_DIR}/doc/html +# Prevent README.md from being considered a directory description (i.e. for Tensor). +IMPLICIT_DIR_DOCS = NO +ALPHABETICAL_INDEX = NO +HTML_OUTPUT = ${Eigen_BINARY_DIR}/doc/html${EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX} +HTML_HEADER = ${Eigen_BINARY_DIR}/doc/eigendoxy_header.html +HTML_FOOTER = ${Eigen_BINARY_DIR}/doc/eigendoxy_footer.html +HTML_EXTRA_FILES = ${Eigen_SOURCE_DIR}/doc/eigendoxy.css +HTML_COLORSTYLE_HUE = ${EIGEN_DOXY_HTML_COLORSTYLE_HUE} +HTML_DYNAMIC_SECTIONS = YES +DISABLE_INDEX = YES +FULL_SIDEBAR = NO +ENUM_VALUES_PER_LINE = 1 +USE_MATHJAX = @EIGEN_DOXY_USE_MATHJAX@ +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 +MATHJAX_EXTENSIONS = TeX/AMSmath \ + TeX/AMSsymbols +GENERATE_LATEX = NO +EXTRA_PACKAGES = amssymb \ + amsmath +MACRO_EXPANSION = YES +EXPAND_ONLY_PREDEF = YES +PREDEFINED = EIGEN_EMPTY_STRUCT \ + EIGEN_PARSED_BY_DOXYGEN \ + EIGEN_VECTORIZE \ + EIGEN_QT_SUPPORT \ + EIGEN_STRONG_INLINE=inline \ + EIGEN_DEVICE_FUNC= \ + "EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template const CwiseBinaryOp, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const;" \ + "EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp, const LHS, const RHS>" \ + "EIGEN_CAT2(a,b)= a ## b" \ + "EIGEN_CAT(a,b)=EIGEN_CAT2(a,b)" \ + "EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME)=CwiseBinaryOp, const LHS, const RHS>" \ + EIGEN_ALIGN_TO_BOUNDARY(x)= \ + "DOXCOMMA=," \ + "EIGEN_STATIC_ASSERT(COND,MSG)=" \ + EIGEN_HAS_CXX11_MATH=1 \ + EIGEN_HAS_CXX11=1 +EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \ + EIGEN_MAKE_FIXED_TYPEDEFS \ + EIGEN_MAKE_TYPEDEFS_ALL_SIZES \ + EIGEN_MAKE_ARRAY_TYPEDEFS \ + EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS \ + EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES \ + EIGEN_CWISE_UNOP_RETURN_TYPE \ + EIGEN_CWISE_BINOP_RETURN_TYPE \ + EIGEN_CURRENT_STORAGE_BASE_CLASS \ + EIGEN_MATHFUNC_IMPL \ + _EIGEN_GENERIC_PUBLIC_INTERFACE \ + EIGEN_ARRAY_DECLARE_GLOBAL_UNARY \ + EIGEN_EMPTY \ + EIGEN_EULER_ANGLES_TYPEDEFS \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF \ + EIGEN_EULER_SYSTEM_TYPEDEF \ + EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY \ + EIGEN_MATRIX_FUNCTION \ + EIGEN_MATRIX_FUNCTION_1 \ + EIGEN_DOC_UNARY_ADDONS \ + EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL \ + EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF \ + EIGEN_MAKE_SCALAR_BINARY_OP \ + EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT +TAGFILES = ${EIGEN_DOXY_TAGFILES} +GENERATE_TAGFILE = ${Eigen_BINARY_DIR}/doc/${EIGEN_DOXY_PROJECT_NAME}.doxytags +EXTERNAL_GROUPS = NO +EXTERNAL_PAGES = NO +HIDE_UNDOC_RELATIONS = NO +HAVE_DOT = YES +COLLABORATION_GRAPH = NO +GROUP_GRAPHS = NO +UML_LOOK = YES +INCLUDE_GRAPH = NO +INCLUDED_BY_GRAPH = NO +GRAPHICAL_HIERARCHY = NO +DIRECTORY_GRAPH = NO +DOT_GRAPH_MAX_NODES = 300 +GENERATE_DEPRECATEDLIST = NO +GENERATE_TODOLIST = NO +WARN_AS_ERROR = FAIL_ON_WARNINGS_PRINT diff --git a/include/eigen/doc/FixedSizeVectorizable.dox b/include/eigen/doc/FixedSizeVectorizable.dox new file mode 100644 index 0000000000000000000000000000000000000000..0012465cae603be78f73ceb9cebf9b6d0f2c6064 --- /dev/null +++ b/include/eigen/doc/FixedSizeVectorizable.dox @@ -0,0 +1,38 @@ +namespace Eigen { + +/** \eigenManualPage TopicFixedSizeVectorizable Fixed-size vectorizable %Eigen objects + +The goal of this page is to explain what we mean by "fixed-size vectorizable". + +\section FixedSizeVectorizable_summary Executive Summary + +An Eigen object is called "fixed-size vectorizable" if it has fixed size and that size is a multiple of 16 bytes. + +Examples include: +\li Eigen::Vector2d +\li Eigen::Vector4d +\li Eigen::Vector4f +\li Eigen::Matrix2d +\li Eigen::Matrix2f +\li Eigen::Matrix4d +\li Eigen::Matrix4f +\li Eigen::Affine3d +\li Eigen::Affine3f +\li Eigen::Quaterniond +\li Eigen::Quaternionf + +\section FixedSizeVectorizable_explanation Explanation + +First, "fixed-size" should be clear: an %Eigen object has fixed size if its number of rows and its number of columns are fixed at compile-time. So for example \ref Matrix3f has fixed size, but \ref MatrixXf doesn't (the opposite of fixed-size is dynamic-size). + +The array of coefficients of a fixed-size %Eigen object is a plain "static array", it is not dynamically allocated. For example, the data behind a \ref Matrix4f is just a "float array[16]". + +Fixed-size objects are typically very small, which means that we want to handle them with zero runtime overhead -- both in terms of memory usage and of speed. + +Now, vectorization works with 128-bit packets (e.g., SSE, AltiVec, NEON), 256-bit packets (e.g., AVX), or 512-bit packets (e.g., AVX512). Moreover, for performance reasons, these packets are most efficiently read and written if they have the same alignment as the packet size, that is 16 bytes, 32 bytes, and 64 bytes respectively. + +So it turns out that the best way that fixed-size %Eigen objects can be vectorized, is if their size is a multiple of 16 bytes (or more). %Eigen will then request 16-byte alignment (or more) for these objects, and henceforth rely on these objects being aligned to achieve maximal efficiency. + +*/ + +} diff --git a/include/eigen/doc/HiPerformance.dox b/include/eigen/doc/HiPerformance.dox new file mode 100644 index 0000000000000000000000000000000000000000..9cee3351c21cae07130bf3f75b9262a6f6c7c528 --- /dev/null +++ b/include/eigen/doc/HiPerformance.dox @@ -0,0 +1,128 @@ + +namespace Eigen { + +/** \page TopicWritingEfficientProductExpression Writing efficient matrix product expressions + +In general achieving good performance with Eigen does no require any special effort: +simply write your expressions in the most high level way. This is especially true +for small fixed size matrices. For large matrices, however, it might be useful to +take some care when writing your expressions in order to minimize useless evaluations +and optimize the performance. +In this page we will give a brief overview of the Eigen's internal mechanism to simplify +and evaluate complex product expressions, and discuss the current limitations. +In particular we will focus on expressions matching level 2 and 3 BLAS routines, i.e, +all kind of matrix products and triangular solvers. + +Indeed, in Eigen we have implemented a set of highly optimized routines which are very similar +to BLAS's ones. Unlike BLAS, those routines are made available to user via a high level and +natural API. Each of these routines can compute in a single evaluation a wide variety of expressions. +Given an expression, the challenge is then to map it to a minimal set of routines. +As explained latter, this mechanism has some limitations, and knowing them will allow +you to write faster code by making your expressions more Eigen friendly. + +\section GEMM General Matrix-Matrix product (GEMM) + +Let's start with the most common primitive: the matrix product of general dense matrices. +In the BLAS world this corresponds to the GEMM routine. Our equivalent primitive can +perform the following operation: +\f$ C.noalias() += \alpha op1(A) op2(B) \f$ +where A, B, and C are column and/or row major matrices (or sub-matrices), +alpha is a scalar value, and op1, op2 can be transpose, adjoint, conjugate, or the identity. +When Eigen detects a matrix product, it analyzes both sides of the product to extract a +unique scalar factor alpha, and for each side, its effective storage order, shape, and conjugation states. +More precisely each side is simplified by iteratively removing trivial expressions such as scalar multiple, +negation and conjugation. Transpose and Block expressions are not evaluated and they only modify the storage order +and shape. All other expressions are immediately evaluated. +For instance, the following expression: +\code m1.noalias() -= s4 * (s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2)) \endcode +is automatically simplified to: +\code m1.noalias() += (s1*s2*conj(s3)*s4) * m2.adjoint() * m3.conjugate() \endcode +which exactly matches our GEMM routine. + +\subsection GEMM_Limitations Limitations +Unfortunately, this simplification mechanism is not perfect yet and not all expressions which could be +handled by a single GEMM-like call are correctly detected. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Not optimal expressionEvaluated asOptimal version (single evaluation)Comments
\code +m1 += m2 * m3; \endcode\code +temp = m2 * m3; +m1 += temp; \endcode\code +m1.noalias() += m2 * m3; \endcodeUse .noalias() to tell Eigen the result and right-hand-sides do not alias. + Otherwise the product m2 * m3 is evaluated into a temporary.
\code +m1.noalias() += s1 * (m2 * m3); \endcodeThis is a special feature of Eigen. Here the product between a scalar + and a matrix product does not evaluate the matrix product but instead it + returns a matrix product expression tracking the scalar scaling factor.
+ Without this optimization, the matrix product would be evaluated into a + temporary as in the next example.
\code +m1.noalias() += (m2 * m3).adjoint(); \endcode\code +temp = m2 * m3; +m1 += temp.adjoint(); \endcode\code +m1.noalias() += m3.adjoint() +* * m2.adjoint(); \endcodeThis is because the product expression has the EvalBeforeNesting bit which + enforces the evaluation of the product by the Tranpose expression.
\code +m1 = m1 + m2 * m3; \endcode\code +temp = m2 * m3; +m1 = m1 + temp; \endcode\code m1.noalias() += m2 * m3; \endcodeHere there is no way to detect at compile time that the two m1 are the same, + and so the matrix product will be immediately evaluated.
\code +m1.noalias() = m4 + m2 * m3; \endcode\code +temp = m2 * m3; +m1 = m4 + temp; \endcode\code +m1 = m4; +m1.noalias() += m2 * m3; \endcodeFirst of all, here the .noalias() in the first expression is useless because + m2*m3 will be evaluated anyway. However, note how this expression can be rewritten + so that no temporary is required. (tip: for very small fixed size matrix + it is slightly better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;
\code +m1.noalias() += (s1*m2).block(..) * m3; \endcode\code +temp = (s1*m2).block(..); +m1 += temp * m3; \endcode\code +m1.noalias() += s1 * m2.block(..) * m3; \endcodeThis is because our expression analyzer is currently not able to extract trivial + expressions nested in a Block expression. Therefore the nested scalar + multiple cannot be properly extracted.
+ +Of course all these remarks hold for all other kind of products involving triangular or selfadjoint matrices. + +*/ + +} diff --git a/include/eigen/doc/InplaceDecomposition.dox b/include/eigen/doc/InplaceDecomposition.dox new file mode 100644 index 0000000000000000000000000000000000000000..cb1c6d413831809cc27306580db6aeb925e61f54 --- /dev/null +++ b/include/eigen/doc/InplaceDecomposition.dox @@ -0,0 +1,115 @@ +namespace Eigen { + +/** \eigenManualPage InplaceDecomposition Inplace matrix decompositions + +Starting from %Eigen 3.3, the LU, Cholesky, and QR decompositions can operate \em inplace, that is, directly within the given input matrix. +This feature is especially useful when dealing with huge matrices, and or when the available memory is very limited (embedded systems). + +To this end, the respective decomposition class must be instantiated with a Ref<> matrix type, and the decomposition object must be constructed with the input matrix as argument. As an example, let us consider an inplace LU decomposition with partial pivoting. + +Let's start with the basic inclusions, and declaration of a 2x2 matrix \c A: + + + + + + + +
codeoutput
\snippet TutorialInplaceLU.cpp init + \snippet TutorialInplaceLU.out init +
+ +No surprise here! Then, let's declare our inplace LU object \c lu, and check the content of the matrix \c A: + + + + + + +
\snippet TutorialInplaceLU.cpp declaration + \snippet TutorialInplaceLU.out declaration +
+ +Here, the \c lu object computes and stores the \c L and \c U factors within the memory held by the matrix \c A. +The coefficients of \c A have thus been destroyed during the factorization, and replaced by the L and U factors as one can verify: + + + + + + +
\snippet TutorialInplaceLU.cpp matrixLU + \snippet TutorialInplaceLU.out matrixLU +
+ +Then, one can use the \c lu object as usual, for instance to solve the Ax=b problem: + + + + + +
\snippet TutorialInplaceLU.cpp solve + \snippet TutorialInplaceLU.out solve +
+ +Here, since the content of the original matrix \c A has been lost, we had to declared a new matrix \c A0 to verify the result. + +Since the memory is shared between \c A and \c lu, modifying the matrix \c A will make \c lu invalid. +This can easily be verified by modifying the content of \c A and trying to solve the initial problem again: + + + + + + +
\snippet TutorialInplaceLU.cpp modifyA + \snippet TutorialInplaceLU.out modifyA +
+ +Note that there is no shared pointer under the hood, it is the \b responsibility \b of \b the \b user to keep the input matrix \c A in life as long as \c lu is living. + +If one wants to update the factorization with the modified A, one has to call the compute method as usual: + + + + + +
\snippet TutorialInplaceLU.cpp recompute + \snippet TutorialInplaceLU.out recompute +
+ +Note that calling compute does not change the memory which is referenced by the \c lu object. Therefore, if the compute method is called with another matrix \c A1 different than \c A, then the content of \c A1 won't be modified. This is still the content of \c A that will be used to store the L and U factors of the matrix \c A1. +This can easily be verified as follows: + + + + + +
\snippet TutorialInplaceLU.cpp recompute_bis0 + \snippet TutorialInplaceLU.out recompute_bis0 +
+The matrix \c A1 is unchanged, and one can thus solve A1*x=b, and directly check the residual without any copy of \c A1: + + + + + +
\snippet TutorialInplaceLU.cpp recompute_bis1 + \snippet TutorialInplaceLU.out recompute_bis1 +
+ + +Here is the list of matrix decompositions supporting this inplace mechanism: + +- class LLT +- class LDLT +- class PartialPivLU +- class FullPivLU +- class HouseholderQR +- class ColPivHouseholderQR +- class FullPivHouseholderQR +- class CompleteOrthogonalDecomposition + +*/ + +} \ No newline at end of file diff --git a/include/eigen/doc/LeastSquares.dox b/include/eigen/doc/LeastSquares.dox new file mode 100644 index 0000000000000000000000000000000000000000..ddbf38dec9b27f7fd5bac0d74efc5af9dfe77ca8 --- /dev/null +++ b/include/eigen/doc/LeastSquares.dox @@ -0,0 +1,75 @@ +namespace Eigen { + +/** \eigenManualPage LeastSquares Solving linear least squares systems + +This page describes how to solve linear least squares systems using %Eigen. An overdetermined system +of equations, say \a Ax = \a b, has no solutions. In this case, it makes sense to search for the +vector \a x which is closest to being a solution, in the sense that the difference \a Ax - \a b is +as small as possible. This \a x is called the least square solution (if the Euclidean norm is used). + +The three methods discussed on this page are the SVD decomposition, the QR decomposition and normal +equations. Of these, the SVD decomposition is generally the most accurate but the slowest, normal +equations is the fastest but least accurate, and the QR decomposition is in between. + +\eigenAutoToc + + +\section LeastSquaresSVD Using the SVD decomposition + +The \link BDCSVD::solve() solve() \endlink method in the BDCSVD class can be directly used to +solve linear squares systems. It is not enough to compute only the singular values (the default for +this class); you also need the singular vectors but the thin SVD decomposition suffices for +computing least squares solutions: + + + + + + + +
Example:Output:
\include TutorialLinAlgSVDSolve.cpp \verbinclude TutorialLinAlgSVDSolve.out
+ +This is example from the page \link TutorialLinearAlgebra Linear algebra and decompositions \endlink. +If you just need to solve the least squares problem, but are not interested in the SVD per se, a +faster alternative method is CompleteOrthogonalDecomposition. + + +\section LeastSquaresQR Using the QR decomposition + +The solve() method in QR decomposition classes also computes the least squares solution. There are +three QR decomposition classes: HouseholderQR (no pivoting, fast but unstable if your matrix is +not rull rank), ColPivHouseholderQR (column pivoting, thus a bit slower but more stable) and +FullPivHouseholderQR (full pivoting, so slowest and slightly more stable than ColPivHouseholderQR). +Here is an example with column pivoting: + + + + + + + +
Example:Output:
\include LeastSquaresQR.cpp \verbinclude LeastSquaresQR.out
+ + +\section LeastSquaresNormalEquations Using normal equations + +Finding the least squares solution of \a Ax = \a b is equivalent to solving the normal equation +ATAx = ATb. This leads to the following code + + + + + + + +
Example:Output:
\include LeastSquaresNormalEquations.cpp \verbinclude LeastSquaresNormalEquations.out
+ +This method is usually the fastest, especially when \a A is "tall and skinny". However, if the +matrix \a A is even mildly ill-conditioned, this is not a good method, because the condition number +of ATA is the square of the condition number of \a A. This means that you +lose roughly twice as many digits of accuracy using the normal equation, compared to the more stable +methods mentioned above. + +*/ + +} \ No newline at end of file diff --git a/include/eigen/doc/Manual.dox b/include/eigen/doc/Manual.dox new file mode 100644 index 0000000000000000000000000000000000000000..65ae778fcffe1bf529dd59af6a77936bc33f2cbb --- /dev/null +++ b/include/eigen/doc/Manual.dox @@ -0,0 +1,191 @@ + +// This file strutures pages and modules into a convenient hierarchical structure. + +namespace Eigen { + +/** \page UserManual_CustomizingEigen Extending/Customizing Eigen + %Eigen can be extended in several ways, for instance, by defining global methods, by inserting custom methods within main %Eigen's classes through the \ref TopicCustomizing_Plugins "plugin" mechanism, by adding support to \ref TopicCustomizing_CustomScalar "custom scalar types" etc. See below for the respective sub-topics. + - \subpage TopicCustomizing_Plugins + - \subpage TopicCustomizing_InheritingMatrix + - \subpage TopicCustomizing_CustomScalar + - \subpage TopicCustomizing_NullaryExpr + - \subpage TopicNewExpressionType + \sa \ref TopicPreprocessorDirectives +*/ + + +/** \page UserManual_Generalities General topics + - \subpage TopicFunctionTakingEigenTypes + - \subpage TopicPreprocessorDirectives + - \subpage TopicAssertions + - \subpage TopicMultiThreading + - \subpage TopicUsingBlasLapack + - \subpage TopicUsingIntelMKL + - \subpage TopicCUDA + - \subpage TopicPitfalls + - \subpage TopicTemplateKeyword + - \subpage UserManual_UnderstandingEigen + - \subpage TopicCMakeGuide +*/ + +/** \page UserManual_UnderstandingEigen Understanding Eigen + - \subpage TopicInsideEigenExample + - \subpage TopicClassHierarchy + - \subpage TopicLazyEvaluation +*/ + +/** \page UnclassifiedPages Unclassified pages + - \subpage TopicResizing + - \subpage TopicVectorization + - \subpage TopicEigenExpressionTemplates + - \subpage TopicScalarTypes + - \subpage TutorialSparse_example_details + - \subpage TopicWritingEfficientProductExpression + - \subpage Experimental +*/ + + +/** \defgroup Support_modules Support modules + * Category of modules which add support for external libraries. + */ + + +/** \defgroup DenseMatrixManipulation_chapter Dense matrix and array manipulation */ +/** \defgroup DenseMatrixManipulation_Alignement Alignment issues */ +/** \defgroup DenseMatrixManipulation_Reference Reference */ + +/** \addtogroup TutorialMatrixClass + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialMatrixArithmetic + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialArrayClass + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialBlockOperations + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialSlicingIndexing + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialAdvancedInitialization + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialReductionsVisitorsBroadcasting + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialReshape + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialSTL + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TutorialMapClass + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TopicAliasing + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TopicStorageOrders + \ingroup DenseMatrixManipulation_chapter */ + +/** \addtogroup DenseMatrixManipulation_Alignement + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup TopicUnalignedArrayAssert + \ingroup DenseMatrixManipulation_Alignement */ +/** \addtogroup TopicFixedSizeVectorizable + \ingroup DenseMatrixManipulation_Alignement */ +/** \addtogroup TopicStructHavingEigenMembers + \ingroup DenseMatrixManipulation_Alignement */ +/** \addtogroup TopicStlContainers + \ingroup DenseMatrixManipulation_Alignement */ +/** \addtogroup TopicPassingByValue + \ingroup DenseMatrixManipulation_Alignement */ +/** \addtogroup TopicWrongStackAlignment + \ingroup DenseMatrixManipulation_Alignement */ + +/** \addtogroup DenseMatrixManipulation_Reference + \ingroup DenseMatrixManipulation_chapter */ +/** \addtogroup Core_Module + \ingroup DenseMatrixManipulation_Reference */ +/** \addtogroup Jacobi_Module + \ingroup DenseMatrixManipulation_Reference */ +/** \addtogroup Householder_Module + \ingroup DenseMatrixManipulation_Reference */ + +/** \addtogroup CoeffwiseMathFunctions + \ingroup DenseMatrixManipulation_chapter */ + +/** \addtogroup QuickRefPage + \ingroup DenseMatrixManipulation_chapter */ + + +/** \defgroup DenseLinearSolvers_chapter Dense linear problems and decompositions */ +/** \defgroup DenseLinearSolvers_Reference Reference */ + +/** \addtogroup TutorialLinearAlgebra + \ingroup DenseLinearSolvers_chapter */ +/** \addtogroup TopicLinearAlgebraDecompositions + \ingroup DenseLinearSolvers_chapter */ +/** \addtogroup LeastSquares + \ingroup DenseLinearSolvers_chapter */ +/** \addtogroup InplaceDecomposition + \ingroup DenseLinearSolvers_chapter */ +/** \addtogroup DenseDecompositionBenchmark + \ingroup DenseLinearSolvers_chapter */ + +/** \addtogroup DenseLinearSolvers_Reference + \ingroup DenseLinearSolvers_chapter */ +/** \addtogroup Cholesky_Module + \ingroup DenseLinearSolvers_Reference */ +/** \addtogroup LU_Module + \ingroup DenseLinearSolvers_Reference */ +/** \addtogroup QR_Module + \ingroup DenseLinearSolvers_Reference */ +/** \addtogroup SVD_Module + \ingroup DenseLinearSolvers_Reference*/ +/** \addtogroup Eigenvalues_Module + \ingroup DenseLinearSolvers_Reference */ + + + + +/** \defgroup Sparse_chapter Sparse linear algebra */ +/** \defgroup Sparse_Reference Reference */ + +/** \addtogroup TutorialSparse + \ingroup Sparse_chapter */ +/** \addtogroup TopicSparseSystems + \ingroup Sparse_chapter */ +/** \addtogroup MatrixfreeSolverExample + \ingroup Sparse_chapter */ + +/** \addtogroup Sparse_Reference + \ingroup Sparse_chapter */ +/** \addtogroup SparseCore_Module + \ingroup Sparse_Reference */ +/** \addtogroup OrderingMethods_Module + \ingroup Sparse_Reference */ +/** \addtogroup SparseCholesky_Module + \ingroup Sparse_Reference */ +/** \addtogroup SparseLU_Module + \ingroup Sparse_Reference */ +/** \addtogroup SparseQR_Module + \ingroup Sparse_Reference */ +/** \addtogroup IterativeLinearSolvers_Module + \ingroup Sparse_Reference */ +/** \addtogroup Sparse_Module + \ingroup Sparse_Reference */ +/** \addtogroup Support_modules + \ingroup Sparse_Reference */ + +/** \addtogroup SparseQuickRefPage + \ingroup Sparse_chapter */ + + +/** \defgroup Geometry_chapter Geometry */ +/** \defgroup Geometry_Reference Reference */ + +/** \addtogroup TutorialGeometry + \ingroup Geometry_chapter */ + +/** \addtogroup Geometry_Reference + \ingroup Geometry_chapter */ +/** \addtogroup Geometry_Module + \ingroup Geometry_Reference */ +/** \addtogroup Splines_Module + \ingroup Geometry_Reference */ + +/** \internal \brief Namespace containing low-level routines from the %Eigen library. */ +namespace internal {} +} diff --git a/include/eigen/doc/Overview.dox b/include/eigen/doc/Overview.dox new file mode 100644 index 0000000000000000000000000000000000000000..3bca80656dbdfb4f0ae87f8664cbacc48f5ec717 --- /dev/null +++ b/include/eigen/doc/Overview.dox @@ -0,0 +1,32 @@ +namespace Eigen { + +/** \mainpage notitle + +This is the API documentation for Eigen3. You can download it as a tgz archive for offline reading. + +For a first contact with Eigen, the best place is to have a look at the \subpage GettingStarted page that show you how to write and compile your first program with Eigen. + +Then, the \b quick \b reference \b pages give you a quite complete description of the API in a very condensed format that is specially useful to recall the syntax of a particular feature, or to have a quick look at the API. They currently cover the two following feature sets, and more will come in the future: + - \link QuickRefPage [QuickRef] Dense matrix and array manipulations \endlink + - \link SparseQuickRefPage [QuickRef] Sparse linear algebra \endlink + +You're a MatLab user? There is also a short ASCII reference with Matlab translations. + +The \b main \b documentation is organized into \em chapters covering different domains of features. +They are themselves composed of \em user \em manual pages describing the different features in a comprehensive way, and \em reference pages that gives you access to the API documentation through the related Eigen's \em modules and \em classes. + +Under the \subpage UserManual_CustomizingEigen section, you will find discussions and examples on extending %Eigen's features and supporting custom scalar types. + +Under the \subpage UserManual_Generalities section, you will find documentation on more general topics such as preprocessor directives, controlling assertions, multi-threading, MKL support, some Eigen's internal insights, and much more... + +For details regarding Eigen's inner-workings, see the \subpage UserManual_UnderstandingEigen section. + +Some random topics can be found under the \subpage UnclassifiedPages section. + +Finally, do not miss the search engine, useful to quickly get to the documentation of a given class or function. + +Want more? Checkout the \em unsupported \em modules documentation. + +*/ + +} diff --git a/include/eigen/doc/PassingByValue.dox b/include/eigen/doc/PassingByValue.dox new file mode 100644 index 0000000000000000000000000000000000000000..9254fe6d88e6da635a2a0f06a23315ed1b587cee --- /dev/null +++ b/include/eigen/doc/PassingByValue.dox @@ -0,0 +1,40 @@ +namespace Eigen { + +/** \eigenManualPage TopicPassingByValue Passing Eigen objects by value to functions + +Passing objects by value is almost always a very bad idea in C++, as this means useless copies, and one should pass them by reference instead. + +With %Eigen, this is even more important: passing \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" by value is not only inefficient, it can be illegal or make your program crash! And the reason is that these %Eigen objects have alignment modifiers that aren't respected when they are passed by value. + +For example, a function like this, where \c v is passed by value: + +\code +void my_function(Eigen::Vector2d v); +\endcode + +needs to be rewritten as follows, passing \c v by const reference: + +\code +void my_function(const Eigen::Vector2d& v); +\endcode + +Likewise if you have a class having an %Eigen object as member: + +\code +struct Foo +{ + Eigen::Vector2d v; +}; +void my_function(Foo v); +\endcode + +This function also needs to be rewritten like this: +\code +void my_function(const Foo& v); +\endcode + +Note that on the other hand, there is no problem with functions that return objects by value. + +*/ + +} diff --git a/include/eigen/doc/Pitfalls.dox b/include/eigen/doc/Pitfalls.dox new file mode 100644 index 0000000000000000000000000000000000000000..85282bd6f544e02bf7c4330837adde410e2771ff --- /dev/null +++ b/include/eigen/doc/Pitfalls.dox @@ -0,0 +1,149 @@ +namespace Eigen { + +/** \page TopicPitfalls Common pitfalls + + +\section TopicPitfalls_template_keyword Compilation error with template methods + +See this \link TopicTemplateKeyword page \endlink. + + +\section TopicPitfalls_aliasing Aliasing + +Don't miss this \link TopicAliasing page \endlink on aliasing, +especially if you got wrong results in statements where the destination appears on the right hand side of the expression. + + +\section TopicPitfalls_alignment_issue Alignment Issues (runtime assertion) + +%Eigen does explicit vectorization, and while that is appreciated by many users, that also leads to some issues in special situations where data alignment is compromised. +Indeed, prior to C++17, C++ does not have quite good enough support for explicit data alignment. +In that case your program hits an assertion failure (that is, a "controlled crash") with a message that tells you to consult this page: +\code +http://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html +\endcode +Have a look at \link TopicUnalignedArrayAssert it \endlink and see for yourself if that's something that you can cope with. +It contains detailed information about how to deal with each known cause for that issue. + +Now what if you don't care about vectorization and so don't want to be annoyed with these alignment issues? Then read \link getrid how to get rid of them \endlink. + + +\section TopicPitfalls_auto_keyword C++11 and the auto keyword + +In short: do not use the auto keywords with %Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a \c Matrix<> type. Here is an example: + +\code +MatrixXd A, B; +auto C = A*B; +for(...) { ... w = C * v; ...} +\endcode + +In this example, the type of C is not a \c MatrixXd but an abstract expression representing a matrix product and storing references to \c A and \c B. +Therefore, the product of \c A*B will be carried out multiple times, once per iteration of the for loop. +Moreover, if the coefficients of `A` or `B` change during the iteration, then `C` will evaluate to different values as in the following example: + +\code +MatrixXd A = ..., B = ...; +auto C = A*B; +MatrixXd R1 = C; +A = ...; +MatrixXd R2 = C; +\endcode +for which we end up with `R1` ≠ `R2`. + + +Here is another example leading to a segfault: +\code +auto C = ((A+B).eval()).transpose(); +// do something with C +\endcode +The problem is that \c eval() returns a temporary object (in this case a \c MatrixXd) which is then referenced by the \c Transpose<> expression. +However, this temporary is deleted right after the first line, and then the \c C expression references a dead object. +One possible fix consists in applying \c eval() on the whole expression: +\code +auto C = (A+B).transpose().eval(); +\endcode + +The same issue might occur when sub expressions are automatically evaluated by %Eigen as in the following example: +\code +VectorXd u, v; +auto C = u + (A*v).normalized(); +// do something with C +\endcode +Here the \c normalized() method has to evaluate the expensive product \c A*v to avoid evaluating it twice. +Again, one possible fix is to call \c .eval() on the whole expression: +\code +auto C = (u + (A*v).normalized()).eval(); +\endcode +In this case, \c C will be a regular \c VectorXd object. +Note that DenseBase::eval() is smart enough to avoid copies when the underlying expression is already a plain \c Matrix<>. + + +\section TopicPitfalls_header_issues Header Issues (failure to compile) + +With all libraries, one must check the documentation for which header to include. +The same is true with %Eigen, but slightly worse: with %Eigen, a method in a class may require an additional \c \#include over what the class itself requires! +For example, if you want to use the \c cross() method on a vector (it computes a cross-product) then you need to: +\code +#include +\endcode +We try to always document this, but do tell us if we forgot an occurrence. + + +\section TopicPitfalls_ternary_operator Ternary operator + +In short: avoid the use of the ternary operator (COND ? THEN : ELSE) with %Eigen's expressions for the \c THEN and \c ELSE statements. +To see why, let's consider the following example: +\code +Vector3f A; +A << 1, 2, 3; +Vector3f B = ((1 < 0) ? (A.reverse()) : A); +\endcode +This example will return B = 3, 2, 1. Do you see why? +The reason is that in c++ the type of the \c ELSE statement is inferred from the type of the \c THEN expression such that both match. +Since \c THEN is a Reverse, the \c ELSE statement A is converted to a Reverse, and the compiler thus generates: +\code +Vector3f B = ((1 < 0) ? (A.reverse()) : Reverse(A)); +\endcode +In this very particular case, a workaround would be to call A.reverse().eval() for the \c THEN statement, but the safest and fastest is really to avoid this ternary operator with %Eigen's expressions and use a if/else construct. + + +\section TopicPitfalls_pass_by_value Pass-by-value + +If you don't know why passing-by-value is wrong with %Eigen, read this \link TopicPassingByValue page \endlink first. + +While you may be extremely careful and use care to make sure that all of your code that explicitly uses %Eigen types is pass-by-reference you have to watch out for templates which define the argument types at compile time. + +If a template has a function that takes arguments pass-by-value, and the relevant template parameter ends up being an %Eigen type, then you will of course have the same alignment problems that you would in an explicitly defined function passing %Eigen types by reference. + +Using %Eigen types with other third party libraries or even the STL can present the same problem. +boost::bind for example uses pass-by-value to store arguments in the returned functor. +This will of course be a problem. + +There are at least two ways around this: + - If the value you are passing is guaranteed to be around for the life of the functor, you can use boost::ref() to wrap the value as you pass it to boost::bind. Generally this is not a solution for values on the stack as if the functor ever gets passed to a lower or independent scope, the object may be gone by the time it's attempted to be used. + - The other option is to make your functions take a reference counted pointer like boost::shared_ptr as the argument. This avoids needing to worry about managing the lifetime of the object being passed. + + +\section TopicPitfalls_matrix_bool Matrices with boolean coefficients + +The current behaviour of using \c Matrix with boolean coefficients is inconsistent and likely to change in future versions of Eigen, so please use it carefully! + +A simple example for such an inconsistency is + +\code +template +void foo() { + Eigen::Matrix A, B, C; + A.setOnes(); + B.setOnes(); + + C = A * B - A * B; + std::cout << C << "\n"; +} +\endcode + +since calling \c foo<3>() prints the zero matrix while calling \c foo<10>() prints the identity matrix. + +*/ +} diff --git a/include/eigen/doc/QuickStartGuide.dox b/include/eigen/doc/QuickStartGuide.dox new file mode 100644 index 0000000000000000000000000000000000000000..6042acaf929ee7dda4cfb32f435e8c91d285e51d --- /dev/null +++ b/include/eigen/doc/QuickStartGuide.dox @@ -0,0 +1,100 @@ +namespace Eigen { + +/** \page GettingStarted Getting started + +\eigenAutoToc + +This is a very short guide on how to get started with Eigen. It has a dual purpose. It serves as a minimal introduction to the Eigen library for people who want to start coding as soon as possible. You can also read this page as the first part of the Tutorial, which explains the library in more detail; in this case you will continue with \ref TutorialMatrixClass. + +\section GettingStartedInstallation How to "install" Eigen? + +In order to use Eigen, you just need to download and extract Eigen's source code (see the wiki for download instructions). In fact, the header files in the \c Eigen subdirectory are the only files required to compile programs using Eigen. The header files are the same for all platforms. It is not necessary to use CMake or install anything. + + +\section GettingStartedFirstProgram A simple first program + +Here is a rather simple program to get you started. + +\include QuickStart_example.cpp + +We will explain the program after telling you how to compile it. + + +\section GettingStartedCompiling Compiling and running your first program + +There is no library to link to. The only thing that you need to keep in mind when compiling the above program is that the compiler must be able to find the Eigen header files. The directory in which you placed Eigen's source code must be in the include path. With GCC you use the \c -I option to achieve this, so you can compile the program with a command like this: + +\code g++ -I /path/to/eigen/ my_program.cpp -o my_program \endcode + +On Linux or Mac OS X, another option is to symlink or copy the Eigen folder into \c /usr/local/include/. This way, you can compile the program with: + +\code g++ my_program.cpp -o my_program \endcode + +When you run the program, it produces the following output: + +\include QuickStart_example.out + + +\section GettingStartedExplanation Explanation of the first program + +The Eigen header files define many types, but for simple applications it may be enough to use only the \c MatrixXd type. This represents a matrix of arbitrary size (hence the \c X in \c MatrixXd), in which every entry is a \c double (hence the \c d in \c MatrixXd). See the \ref QuickRef_Types "quick reference guide" for an overview of the different types you can use to represent a matrix. + +The \c Eigen/Dense header file defines all member functions for the MatrixXd type and related types (see also the \ref QuickRef_Headers "table of header files"). All classes and functions defined in this header file (and other Eigen header files) are in the \c Eigen namespace. + +The first line of the \c main function declares a variable of type \c MatrixXd and specifies that it is a matrix with 2 rows and 2 columns (the entries are not initialized). The statement m(0,0) = 3 sets the entry in the top-left corner to 3. You need to use round parentheses to refer to entries in the matrix. As usual in computer science, the index of the first index is 0, as opposed to the convention in mathematics that the first index is 1. + +The following three statements sets the other three entries. The final line outputs the matrix \c m to the standard output stream. + + +\section GettingStartedExample2 Example 2: Matrices and vectors + +Here is another example, which combines matrices with vectors. Concentrate on the left-hand program for now; we will talk about the right-hand program later. + + + + +
Size set at run time:Size set at compile time:
+\include QuickStart_example2_dynamic.cpp + +\include QuickStart_example2_fixed.cpp +
+ +The output is as follows: + +\include QuickStart_example2_dynamic.out + + +\section GettingStartedExplanation2 Explanation of the second example + +The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const DenseBase::Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetic. + +The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows: + +\f[ +v = +\begin{bmatrix} + 1 \\ + 2 \\ + 3 +\end{bmatrix}. +\f] + +The final line of the program multiplies the matrix \c m with the vector \c v and outputs the result. + +Now look back at the second example program. We presented two versions of it. In the version in the left column, the matrix is of type \c MatrixXd which represents matrices of arbitrary size. The version in the right column is similar, except that the matrix is of type \c Matrix3d, which represents matrices of a fixed size (here 3-by-3). Because the type already encodes the size of the matrix, it is not necessary to specify the size in the constructor; compare MatrixXd m(3,3) with Matrix3d m. Similarly, we have \c VectorXd on the left (arbitrary size) versus \c Vector3d on the right (fixed size). Note that here the coefficients of vector \c v are directly set in the constructor, though the same syntax of the left example could be used too. + +The use of fixed-size matrices and vectors has two advantages. The compiler emits better (faster) code because it knows the size of the matrices and vectors. Specifying the size in the type also allows for more rigorous checking at compile-time. For instance, the compiler will complain if you try to multiply a \c Matrix4d (a 4-by-4 matrix) with a \c Vector3d (a vector of size 3). However, the use of many types increases compilation time and the size of the executable. The size of the matrix may also not be known at compile-time. A rule of thumb is to use fixed-size matrices for size 4-by-4 and smaller. + + +\section GettingStartedConclusion Where to go from here? + +It's worth taking the time to read the \ref TutorialMatrixClass "long tutorial". + +However if you think you don't need it, you can directly use the classes documentation and our \ref QuickRefPage. + +\li \b Next: \ref TutorialMatrixClass + +*/ + +} + diff --git a/include/eigen/doc/SparseQuickReference.dox b/include/eigen/doc/SparseQuickReference.dox new file mode 100644 index 0000000000000000000000000000000000000000..b69fe2279194da9d17af5274a1e1c47ff6184c0e --- /dev/null +++ b/include/eigen/doc/SparseQuickReference.dox @@ -0,0 +1,272 @@ +namespace Eigen { +/** \eigenManualPage SparseQuickRefPage Quick reference guide for sparse matrices +\eigenAutoToc + +
+ +In this page, we give a quick summary of the main operations available for sparse matrices in the class SparseMatrix. First, it is recommended to read the introductory tutorial at \ref TutorialSparse. The important point to have in mind when working on sparse matrices is how they are stored : +i.e either row major or column major. The default is column major. Most arithmetic operations on sparse matrices will assert that they have the same storage order. + +\section SparseMatrixInit Sparse Matrix Initialization + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Category Operations Notes
Constructor +\code + SparseMatrix sm1(1000,1000); + SparseMatrix,RowMajor> sm2; +\endcode + Default is ColMajor
Resize/Reserve + \code + sm1.resize(m,n); // Change sm1 to a m x n matrix. + sm1.reserve(nnz); // Allocate room for nnz nonzeros elements. + \endcode + Note that when calling reserve(), it is not required that nnz is the exact number of nonzero elements in the final matrix. However, an exact estimation will avoid multiple reallocations during the insertion phase.
Assignment +\code + SparseMatrix sm1; + // Initialize sm2 with sm1. + SparseMatrix sm2(sm1), sm3; + // Assignment and evaluations modify the storage order. + sm3 = sm1; + \endcode + The copy constructor can be used to convert from a storage order to another
Element-wise Insertion +\code +// Insert a new element; + sm1.insert(i, j) = v_ij; + +// Update the value v_ij + sm1.coeffRef(i,j) = v_ij; + sm1.coeffRef(i,j) += v_ij; + sm1.coeffRef(i,j) -= v_ij; +\endcode + insert() assumes that the element does not already exist; otherwise, use coeffRef()
Batch insertion +\code + std::vector< Eigen::Triplet > tripletList; + tripletList.reserve(estimation_of_entries); + // -- Fill tripletList with nonzero elements... + sm1.setFromTriplets(TripletList.begin(), TripletList.end()); +\endcode +A complete example is available at \link TutorialSparseFilling Triplet Insertion \endlink.
Constant or Random Insertion +\code +sm1.setZero(); +\endcode +Remove all non-zero coefficients
+ + +\section SparseBasicInfos Matrix properties +Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some information from the matrix. + + + + +
\code + sm1.rows(); // Number of rows + sm1.cols(); // Number of columns + sm1.nonZeros(); // Number of non zero values + sm1.outerSize(); // Number of columns (resp. rows) for a column major (resp. row major ) + sm1.innerSize(); // Number of rows (resp. columns) for a row major (resp. column major) + sm1.norm(); // Euclidean norm of the matrix + sm1.squaredNorm(); // Squared norm of the matrix + sm1.blueNorm(); + sm1.isVector(); // Check if sm1 is a sparse vector or a sparse matrix + sm1.isCompressed(); // Check if sm1 is in compressed form + ... + \endcode
+ +\section SparseBasicOps Arithmetic operations +It is easy to perform arithmetic operations on sparse matrices provided that the dimensions are adequate and that the matrices have the same storage order. Note that the evaluation can always be done in a matrix with a different storage order. In the following, \b sm denotes a sparse matrix, \b dm a dense matrix and \b dv a dense vector. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Operations Code Notes
add subtract \code + sm3 = sm1 + sm2; + sm3 = sm1 - sm2; + sm2 += sm1; + sm2 -= sm1; \endcode + + sm1 and sm2 should have the same storage order +
+ scalar product\code + sm3 = sm1 * s1; sm3 *= s1; + sm3 = s1 * sm1 + s2 * sm2; sm3 /= s1;\endcode + + Many combinations are possible if the dimensions and the storage order agree. +
%Sparse %Product \code + sm3 = sm1 * sm2; + dm2 = sm1 * dm1; + dv2 = sm1 * dv1; + \endcode +
transposition, adjoint \code + sm2 = sm1.transpose(); + sm2 = sm1.adjoint(); + \endcode + Note that the transposition change the storage order. There is no support for transposeInPlace(). +
Permutation +\code +perm.indices(); // Reference to the vector of indices +sm1.twistedBy(perm); // Permute rows and columns +sm2 = sm1 * perm; // Permute the rows +sm2 = perm * sm1; // Permute the columns +\endcode + + +
+ Component-wise ops + \code + sm1.cwiseProduct(sm2); + sm1.cwiseQuotient(sm2); + sm1.cwiseMin(sm2); + sm1.cwiseMax(sm2); + sm1.cwiseAbs(); + sm1.cwiseSqrt(); + \endcode + sm1 and sm2 should have the same storage order +
+ +\section sparseotherops Other supported operations + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Code Notes
Sub-matrices
+\code + sm1.block(startRow, startCol, rows, cols); + sm1.block(startRow, startCol); + sm1.topLeftCorner(rows, cols); + sm1.topRightCorner(rows, cols); + sm1.bottomLeftCorner( rows, cols); + sm1.bottomRightCorner( rows, cols); + \endcode + +Contrary to dense matrices, here all these methods are read-only.\n +See \ref TutorialSparse_SubMatrices and below for read-write sub-matrices. +
Range
+\code + sm1.innerVector(outer); // RW + sm1.innerVectors(start, size); // RW + sm1.leftCols(size); // RW + sm2.rightCols(size); // RO because sm2 is row-major + sm1.middleRows(start, numRows); // RO because sm1 is column-major + sm1.middleCols(start, numCols); // RW + sm1.col(j); // RW +\endcode + +A inner vector is either a row (for row-major) or a column (for column-major).\n +As stated earlier, for a read-write sub-matrix (RW), the evaluation can be done in a matrix with different storage order. +
Triangular and selfadjoint views
+\code + sm2 = sm1.triangularview(); + sm2 = sm1.selfadjointview(); +\endcode + Several combination between triangular views and blocks views are possible +\code + \endcode
Triangular solve
+\code + dv2 = sm1.triangularView().solve(dv1); + dv2 = sm1.topLeftCorner(size, size) + .triangularView().solve(dv1); +\endcode + For general sparse solve, Use any suitable module described at \ref TopicSparseSystems
Low-level API
+\code +sm1.valuePtr(); // Pointer to the values +sm1.innerIndexPtr(); // Pointer to the indices. +sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector +\endcode + +If the matrix is not in compressed form, `makeCompressed()` should be called before.\n +Note that these functions are mostly provided for interoperability purposes with external libraries.\n +A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section
Mapping external buffers
+\code +int outerIndexPtr[cols+1]; +int innerIndices[nnz]; +double values[nnz]; +Map > sm1(rows,cols,nnz,outerIndexPtr, // read-write + innerIndices,values); +Map > sm2(...); // read-only +\endcode +As for dense matrices, class Map can be used to see external buffers as an %Eigen's SparseMatrix object.
+*/ +} diff --git a/include/eigen/doc/StlContainers.dox b/include/eigen/doc/StlContainers.dox new file mode 100644 index 0000000000000000000000000000000000000000..0342573d0b88c39f92071a608b2f7a7d14b8b39a --- /dev/null +++ b/include/eigen/doc/StlContainers.dox @@ -0,0 +1,73 @@ +namespace Eigen { + +/** \eigenManualPage TopicStlContainers Using STL Containers with Eigen + +\eigenAutoToc + +\section StlContainers_summary Executive summary + +If you're compiling in \cpp17 mode only with a sufficiently recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then everything is taken care by the compiler and you can stop reading. + +Otherwise, using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires the use of an over-aligned allocator. +That is, an allocator capable of allocating buffers with 16, 32, or even 64 bytes alignment. +%Eigen does provide one ready for use: aligned_allocator. + +Prior to \cpp11, if you want to use the `std::vector` container, then you also have to \#include . + +These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". +For other %Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers. + +\section allocator Using an aligned allocator + +STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned (or more) locations. Fortunately, %Eigen does provide such an allocator: Eigen::aligned_allocator. + +For example, instead of +\code +std::map +\endcode +you need to use +\code +std::map, + Eigen::aligned_allocator > > +\endcode +Note that the third parameter `std::less` is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type. + +\section StlContainers_vector The case of std::vector + +This section is for c++98/03 users only. \cpp11 (or above) users can stop reading here. + +So in c++98/03, the situation with `std::vector` is more complicated because of a bug in the standard (explanation below). +To workaround the issue, we had to specialize it for the Eigen::aligned_allocator type. +In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include . + +Here is an example: +\code +#include +/* ... */ +std::vector > +\endcode + +\b Explanation: The `resize()` method of `std::vector` takes a `value_type` argument (defaulting to `value_type()`). So with `std::vector`, some Eigen::Vector4d objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4d can be created at an unaligned location. +In order to avoid that, the only solution we saw was to specialize `std::vector` to make it work on a slight modification of, here, Eigen::Vector4d, that is able to deal properly with this situation. + + +\subsection vector_spec An alternative - specializing std::vector for Eigen types + +As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment. +The advantage is that you won't need to declare std::vector all over with Eigen::aligned_allocator. One drawback on the other hand side is that +the specialization needs to be defined before all code pieces in which e.g. `std::vector` is used. Otherwise, without knowing the specialization +the compiler will compile that particular instance with the default `std::allocator` and you program is most likely to crash. + +Here is an example: +\code +#include +/* ... */ +EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d) +std::vector +\endcode + + + +*/ + +} diff --git a/include/eigen/doc/StorageOrders.dox b/include/eigen/doc/StorageOrders.dox new file mode 100644 index 0000000000000000000000000000000000000000..61645313ee8586bd4ada5b8bffc2eb69ac8d4ce5 --- /dev/null +++ b/include/eigen/doc/StorageOrders.dox @@ -0,0 +1,86 @@ +namespace Eigen { + +/** \eigenManualPage TopicStorageOrders Storage orders + +There are two different storage orders for matrices and two-dimensional arrays: column-major and row-major. +This page explains these storage orders and how to specify which one should be used. + +\eigenAutoToc + + +\section TopicStorageOrdersIntro Column-major and row-major storage + +The entries of a matrix form a two-dimensional grid. However, when the matrix is stored in memory, the entries +have to somehow be laid out linearly. There are two main ways to do this, by row and by column. + +We say that a matrix is stored in \b row-major order if it is stored row by row. The entire first row is +stored first, followed by the entire second row, and so on. Consider for example the matrix + +\f[ +A = \begin{bmatrix} +8 & 2 & 2 & 9 \\ +9 & 1 & 4 & 4 \\ +3 & 5 & 4 & 5 +\end{bmatrix}. +\f] + +If this matrix is stored in row-major order, then the entries are laid out in memory as follows: + +\code 8 2 2 9 9 1 4 4 3 5 4 5 \endcode + +On the other hand, a matrix is stored in \b column-major order if it is stored column by column, starting with +the entire first column, followed by the entire second column, and so on. If the above matrix is stored in +column-major order, it is laid out as follows: + +\code 8 9 3 2 1 5 2 4 4 9 4 5 \endcode + +This example is illustrated by the following Eigen code. It uses the PlainObjectBase::data() function, which +returns a pointer to the memory location of the first entry of the matrix. + + + + +
ExampleOutput
+\include TopicStorageOrders_example.cpp + +\verbinclude TopicStorageOrders_example.out +
+ + +\section TopicStorageOrdersInEigen Storage orders in Eigen + +The storage order of a matrix or a two-dimensional array can be set by specifying the \c Options template +parameter for Matrix or Array. As \ref TutorialMatrixClass explains, the %Matrix class template has six +template parameters, of which three are compulsory (\c Scalar, \c RowsAtCompileTime and \c ColsAtCompileTime) +and three are optional (\c Options, \c MaxRowsAtCompileTime and \c MaxColsAtCompileTime). If the \c Options +parameter is set to \c RowMajor, then the matrix or array is stored in row-major order; if it is set to +\c ColMajor, then it is stored in column-major order. This mechanism is used in the above Eigen program to +specify the storage order. + +If the storage order is not specified, then Eigen defaults to storing the entry in column-major. This is also +the case if one of the convenience typedefs (\c Matrix3f, \c ArrayXXd, etc.) is used. + +Matrices and arrays using one storage order can be assigned to matrices and arrays using the other storage +order, as happens in the above program when \c Arowmajor is initialized using \c Acolmajor. Eigen will reorder +the entries automatically. More generally, row-major and column-major matrices can be mixed in an expression +as we want. + + +\section TopicStorageOrdersWhich Which storage order to choose? + +So, which storage order should you use in your program? There is no simple answer to this question; it depends +on your application. Here are some points to keep in mind: + + - Your users may expect you to use a specific storage order. Alternatively, you may use other libraries than + Eigen, and these other libraries may expect a certain storage order. In these cases it may be easiest and + fastest to use this storage order in your whole program. + - Algorithms that traverse a matrix row by row will go faster when the matrix is stored in row-major order + because of better data locality. Similarly, column-by-column traversal is faster for column-major + matrices. It may be worthwhile to experiment a bit to find out what is faster for your particular + application. + - The default in Eigen is column-major. Naturally, most of the development and testing of the Eigen library + is thus done with column-major matrices. This means that, even though we aim to support column-major and + row-major storage orders transparently, the Eigen library may well work best with column-major matrices. + +*/ +} diff --git a/include/eigen/doc/TemplateKeyword.dox b/include/eigen/doc/TemplateKeyword.dox new file mode 100644 index 0000000000000000000000000000000000000000..fbf2c708165b76830bc06c0974f60943fdb6ae10 --- /dev/null +++ b/include/eigen/doc/TemplateKeyword.dox @@ -0,0 +1,133 @@ +namespace Eigen { + +/** \page TopicTemplateKeyword The template and typename keywords in C++ + +There are two uses for the \c template and \c typename keywords in C++. One of them is fairly well known +amongst programmers: to define templates. The other use is more obscure: to specify that an expression refers +to a template function or a type. This regularly trips up programmers that use the %Eigen library, often +leading to error messages from the compiler that are difficult to understand, such as "expected expression" or +"no match for operator<". + +\eigenAutoToc + + +\section TopicTemplateKeywordToDefineTemplates Using the template and typename keywords to define templates + +The \c template and \c typename keywords are routinely used to define templates. This is not the topic of this +page as we assume that the reader is aware of this (otherwise consult a C++ book). The following example +should illustrate this use of the \c template keyword. + +\code +template +bool isPositive(T x) +{ + return x > 0; +} +\endcode + +We could just as well have written template <class T>; the keywords \c typename and \c class have the +same meaning in this context. + + +\section TopicTemplateKeywordExample An example showing the second use of the template keyword + +Let us illustrate the second use of the \c template keyword with an example. Suppose we want to write a +function which copies all entries in the upper triangular part of a matrix into another matrix, while keeping +the lower triangular part unchanged. A straightforward implementation would be as follows: + + + + +
Example:Output:
+\include TemplateKeyword_simple.cpp + +\verbinclude TemplateKeyword_simple.out +
+ +That works fine, but it is not very flexible. First, it only works with dynamic-size matrices of +single-precision floats; the function \c copyUpperTriangularPart() does not accept static-size matrices or +matrices with double-precision numbers. Second, if you use an expression such as +mat.topLeftCorner(3,3) as the parameter \c src, then this is copied into a temporary variable of type +MatrixXf; this copy can be avoided. + +As explained in \ref TopicFunctionTakingEigenTypes, both issues can be resolved by making +\c copyUpperTriangularPart() accept any object of type MatrixBase. This leads to the following code: + + + + +
Example:Output:
+\include TemplateKeyword_flexible.cpp + +\verbinclude TemplateKeyword_flexible.out +
+ +The one line in the body of the function \c copyUpperTriangularPart() shows the second, more obscure use of +the \c template keyword in C++. Even though it may look strange, the \c template keywords are necessary +according to the standard. Without it, the compiler may reject the code with an error message like "no match +for operator<". + + +\section TopicTemplateKeywordExplanation Explanation + +The reason that the \c template keyword is necessary in the last example has to do with the rules for how +templates are supposed to be compiled in C++. The compiler has to check the code for correct syntax at the +point where the template is defined, without knowing the actual value of the template arguments (\c Derived1 +and \c Derived2 in the example). That means that the compiler cannot know that dst.triangularView is +a member template and that the following < symbol is part of the delimiter for the template +parameter. Another possibility would be that dst.triangularView is a member variable with the < +symbol referring to the operator<() function. In fact, the compiler should choose the second +possibility, according to the standard. If dst.triangularView is a member template (as in our case), +the programmer should specify this explicitly with the \c template keyword and write dst.template +triangularView. + +The precise rules are rather complicated, but ignoring some subtleties we can summarize them as follows: +- A dependent name is name that depends (directly or indirectly) on a template parameter. In the + example, \c dst is a dependent name because it is of type MatrixBase<Derived1> which depends + on the template parameter \c Derived1. +- If the code contains either one of the constructs xxx.yyy or xxx->yyy and \c xxx is a + dependent name and \c yyy refers to a member template, then the \c template keyword must be used before + \c yyy, leading to xxx.template yyy or xxx->template yyy. +- If the code contains the construct xxx::yyy and \c xxx is a dependent name and \c yyy refers to a + member typedef, then the \c typename keyword must be used before the whole construct, leading to + typename xxx::yyy. + +As an example where the \c typename keyword is required, consider the following code in \ref TutorialSparse +for iterating over the non-zero entries of a sparse matrix type: + +\code +SparseMatrixType mat(rows,cols); +for (int k=0; k +void iterateOverSparseMatrix(const SparseMatrix& mat; +{ + for (int k=0; k::InnerIterator it(mat,k); it; ++it) + { + /* ... */ + } +} +\endcode + + +\section TopicTemplateKeywordResources Resources for further reading + +For more information and a fuller explanation of this topic, the reader may consult the following sources: +- The book "C++ Template Metaprogramming" by David Abrahams and Aleksey Gurtovoy contains a very good + explanation in Appendix B ("The typename and template Keywords") which formed the basis for this page. +- http://pages.cs.wisc.edu/~driscoll/typename.html +- http://www.parashift.com/c++-faq-lite/templates.html#faq-35.18 +- http://www.comeaucomputing.com/techtalk/templates/#templateprefix +- http://www.comeaucomputing.com/techtalk/templates/#typename + +*/ +} diff --git a/include/eigen/doc/TopicAliasing.dox b/include/eigen/doc/TopicAliasing.dox new file mode 100644 index 0000000000000000000000000000000000000000..a8f1644284e82af3cdc653548a4e74259bbc81a4 --- /dev/null +++ b/include/eigen/doc/TopicAliasing.dox @@ -0,0 +1,237 @@ +namespace Eigen { + +/** \eigenManualPage TopicAliasing Aliasing + +In %Eigen, aliasing refers to assignment statement in which the same matrix (or array or vector) appears on the +left and on the right of the assignment operators. Statements like mat = 2 * mat; or mat = +mat.transpose(); exhibit aliasing. The aliasing in the first example is harmless, but the aliasing in the +second example leads to unexpected results. This page explains what aliasing is, when it is harmful, and what +to do about it. + +\eigenAutoToc + + +\section TopicAliasingExamples Examples + +Here is a simple example exhibiting aliasing: + + + + +
ExampleOutput
+\include TopicAliasing_block.cpp + +\verbinclude TopicAliasing_block.out +
+ +The output is not what one would expect. The problem is the assignment +\code +mat.bottomRightCorner(2,2) = mat.topLeftCorner(2,2); +\endcode +This assignment exhibits aliasing: the coefficient \c mat(1,1) appears both in the block +mat.bottomRightCorner(2,2) on the left-hand side of the assignment and the block +mat.topLeftCorner(2,2) on the right-hand side. After the assignment, the (2,2) entry in the bottom +right corner should have the value of \c mat(1,1) before the assignment, which is 5. However, the output shows +that \c mat(2,2) is actually 1. The problem is that %Eigen uses lazy evaluation (see +\ref TopicEigenExpressionTemplates) for mat.topLeftCorner(2,2). The result is similar to +\code +mat(1,1) = mat(0,0); +mat(1,2) = mat(0,1); +mat(2,1) = mat(1,0); +mat(2,2) = mat(1,1); +\endcode +Thus, \c mat(2,2) is assigned the \e new value of \c mat(1,1) instead of the old value. The next section +explains how to solve this problem by calling \link DenseBase::eval() eval()\endlink. + +Aliasing occurs more naturally when trying to shrink a matrix. For example, the expressions vec = +vec.head(n) and mat = mat.block(i,j,r,c) exhibit aliasing. + +In general, aliasing cannot be detected at compile time: if \c mat in the first example were a bit bigger, +then the blocks would not overlap, and there would be no aliasing problem. However, %Eigen does detect some +instances of aliasing, albeit at run time. The following example exhibiting aliasing was mentioned in \ref +TutorialMatrixArithmetic : + + + + +
ExampleOutput
+\include tut_arithmetic_transpose_aliasing.cpp + +\verbinclude tut_arithmetic_transpose_aliasing.out +
+ +Again, the output shows the aliasing issue. However, by default %Eigen uses a run-time assertion to detect this +and exits with a message like + +\verbatim +void Eigen::DenseBase::checkTransposeAliasing(const OtherDerived&) const +[with OtherDerived = Eigen::Transpose >, Derived = Eigen::Matrix]: +Assertion `(!internal::check_transpose_aliasing_selector::IsTransposed,OtherDerived>::run(internal::extract_data(derived()), other)) +&& "aliasing detected during transposition, use transposeInPlace() or evaluate the rhs into a temporary using .eval()"' failed. +\endverbatim + +The user can turn %Eigen's run-time assertions like the one to detect this aliasing problem off by defining the +EIGEN_NO_DEBUG macro, and the above program was compiled with this macro turned off in order to illustrate the +aliasing problem. See \ref TopicAssertions for more information about %Eigen's run-time assertions. + + +\section TopicAliasingSolution Resolving aliasing issues + +If you understand the cause of the aliasing issue, then it is obvious what must happen to solve it: %Eigen has +to evaluate the right-hand side fully into a temporary matrix/array and then assign it to the left-hand +side. The function \link DenseBase::eval() eval() \endlink does precisely that. + +For example, here is the corrected version of the first example above: + + + + +
ExampleOutput
+\include TopicAliasing_block_correct.cpp + +\verbinclude TopicAliasing_block_correct.out +
+ +Now, \c mat(2,2) equals 5 after the assignment, as it should be. + +The same solution also works for the second example, with the transpose: simply replace the line +a = a.transpose(); with a = a.transpose().eval();. However, in this common case there is a +better solution. %Eigen provides the special-purpose function +\link DenseBase::transposeInPlace() transposeInPlace() \endlink which replaces a matrix by its transpose. +This is shown below: + + + + +
ExampleOutput
+\include tut_arithmetic_transpose_inplace.cpp + +\verbinclude tut_arithmetic_transpose_inplace.out +
+ +If an xxxInPlace() function is available, then it is best to use it, because it indicates more clearly what you +are doing. This may also allow %Eigen to optimize more aggressively. These are some of the xxxInPlace() +functions provided: + + + + + + + + + +
Original functionIn-place function
MatrixBase::adjoint() MatrixBase::adjointInPlace()
DenseBase::reverse() DenseBase::reverseInPlace()
LDLT::solve() LDLT::solveInPlace()
LLT::solve() LLT::solveInPlace()
TriangularView::solve() TriangularView::solveInPlace()
DenseBase::transpose() DenseBase::transposeInPlace()
+ +In the special case where a matrix or vector is shrunk using an expression like vec = vec.head(n), +you can use \link PlainObjectBase::conservativeResize() conservativeResize() \endlink. + + +\section TopicAliasingCwise Aliasing and component-wise operations + +As explained above, it may be dangerous if the same matrix or array occurs on both the left-hand side and the +right-hand side of an assignment operator, and it is then often necessary to evaluate the right-hand side +explicitly. However, applying component-wise operations (such as matrix addition, scalar multiplication and +array multiplication) is safe. + +The following example has only component-wise operations. Thus, there is no need for \link DenseBase::eval() +eval() \endlink even though the same matrix appears on both sides of the assignments. + + + + +
ExampleOutput
+\include TopicAliasing_cwise.cpp + +\verbinclude TopicAliasing_cwise.out +
+ +In general, an assignment is safe if the (i,j) entry of the expression on the right-hand side depends only on +the (i,j) entry of the matrix or array on the left-hand side and not on any other entries. In that case it is +not necessary to evaluate the right-hand side explicitly. + + +\section TopicAliasingMatrixMult Aliasing and matrix multiplication + +Matrix multiplication is the only operation in %Eigen that assumes aliasing by default, under the +condition that the destination matrix is not resized. +Thus, if \c matA is a \b squared matrix, then the statement matA = matA * matA; is safe. +All other operations in %Eigen assume that there are no aliasing problems, +either because the result is assigned to a different matrix or because it is a component-wise operation. + + + + +
ExampleOutput
+\include TopicAliasing_mult1.cpp + +\verbinclude TopicAliasing_mult1.out +
+ +However, this comes at a price. When executing the expression matA = matA * matA, %Eigen evaluates the +product in a temporary matrix which is assigned to \c matA after the computation. This is fine. But %Eigen does +the same when the product is assigned to a different matrix (e.g., matB = matA * matA). In that case, +it is more efficient to evaluate the product directly into \c matB instead of evaluating it first into a +temporary matrix and copying that matrix to \c matB. + +The user can indicate with the \link MatrixBase::noalias() noalias()\endlink function that there is no +aliasing, as follows: matB.noalias() = matA * matA. This allows %Eigen to evaluate the matrix product +matA * matA directly into \c matB. + + + + +
ExampleOutput
+\include TopicAliasing_mult2.cpp + +\verbinclude TopicAliasing_mult2.out +
+ +Of course, you should not use \c noalias() when there is in fact aliasing taking place. If you do, then you +may get wrong results: + + + + +
ExampleOutput
+\include TopicAliasing_mult3.cpp + +\verbinclude TopicAliasing_mult3.out +
+ +Moreover, starting in Eigen 3.3, aliasing is \b not assumed if the destination matrix is resized and the product is not directly assigned to the destination. +Therefore, the following example is also wrong: + + + + +
ExampleOutput
+\include TopicAliasing_mult4.cpp + +\verbinclude TopicAliasing_mult4.out +
+ +As for any aliasing issue, you can resolve it by explicitly evaluating the expression prior to assignment: + + + +
ExampleOutput
+\include TopicAliasing_mult5.cpp + +\verbinclude TopicAliasing_mult5.out +
+ +\section TopicAliasingSummary Summary + +Aliasing occurs when the same matrix or array coefficients appear both on the left- and the right-hand side of +an assignment operator. + - Aliasing is harmless with coefficient-wise computations; this includes scalar multiplication and matrix or + array addition. + - When you multiply two matrices, %Eigen assumes that aliasing occurs. If you know that there is no aliasing, + then you can use \link MatrixBase::noalias() noalias()\endlink. + - In all other situations, %Eigen assumes that there is no aliasing issue and thus gives the wrong result if + aliasing does in fact occur. To prevent this, you have to use \link DenseBase::eval() eval() \endlink or + one of the xxxInPlace() functions. + +*/ +} diff --git a/include/eigen/doc/TopicCMakeGuide.dox b/include/eigen/doc/TopicCMakeGuide.dox new file mode 100644 index 0000000000000000000000000000000000000000..898886e0005d9e0e77ea79b6a331e7aae4f1472c --- /dev/null +++ b/include/eigen/doc/TopicCMakeGuide.dox @@ -0,0 +1,65 @@ +namespace Eigen { + +/** + +\page TopicCMakeGuide Using %Eigen in CMake Projects + +%Eigen provides native CMake support which allows the library to be easily +used in CMake projects. + +\note %CMake 3.5 (or later) is required to enable this functionality. + +%Eigen exports a CMake target called `Eigen3::Eigen` which can be imported +using the `find_package` CMake command and used by calling +`target_link_libraries` as in the following example: +\code{.cmake} +cmake_minimum_required (VERSION 3.5) +project (myproject) + +find_package (Eigen3 REQUIRED NO_MODULE) + +add_executable (example example.cpp) +target_link_libraries (example Eigen3::Eigen) +\endcode + +The above code snippet must be placed in a file called `CMakeLists.txt` alongside +`example.cpp`. After running +\code{.sh} +$ cmake path-to-example-directory +\endcode +CMake will produce project files that generate an executable called `example`. +Here, `path-to-example-directory` is the path to the directory that contains +both `CMakeLists.txt` and `example.cpp`. Note that if you have multiple +instances of %Eigen installed, `find_package` will use the first one +encountered. To request a specific version of %Eigen, use the `` +option in `find_package`: +``` +find_package(Eigen3 3.4 REQUIRED NO_MODULE) +``` +or to support a range of versions: +``` +find_package(Eigen3 3.3...5 REQUIRED NO_MODULE) # Any version >=3.3.0 but <6.0.0. +``` + +Do not forget to set the \c CMAKE_PREFIX_PATH variable if Eigen is not installed in a default location or if you want to pick a specific version. For instance: +\code{.sh} +$ cmake path-to-example-directory -DCMAKE_PREFIX_PATH=$HOME/mypackages +\endcode +An alternative is to set the \c Eigen3_DIR cmake's variable to the respective path containing the \c Eigen3*.cmake files. For instance: +\code{.sh} +$ cmake path-to-example-directory -DEigen3_DIR=$HOME/mypackages/share/eigen3/cmake/ +\endcode + +If the `REQUIRED` option is omitted when locating %Eigen using +`find_package`, one can check whether the package was found as follows: +\code{.cmake} +find_package (Eigen3 NO_MODULE) + +if (TARGET Eigen3::Eigen) + # Use the imported target +endif (TARGET Eigen3::Eigen) +\endcode + +*/ + +} diff --git a/include/eigen/doc/TopicVectorization.dox b/include/eigen/doc/TopicVectorization.dox new file mode 100644 index 0000000000000000000000000000000000000000..274d0451bbbc2716cf31fcb4a1c02a0f3e009017 --- /dev/null +++ b/include/eigen/doc/TopicVectorization.dox @@ -0,0 +1,9 @@ +namespace Eigen { + +/** \page TopicVectorization Vectorization + + +TODO: write this dox page! + +*/ +} diff --git a/include/eigen/doc/TutorialAdvancedInitialization.dox b/include/eigen/doc/TutorialAdvancedInitialization.dox new file mode 100644 index 0000000000000000000000000000000000000000..50374d0d0b4b3bdb9664f9ce8b0e95771b897ead --- /dev/null +++ b/include/eigen/doc/TutorialAdvancedInitialization.dox @@ -0,0 +1,162 @@ +namespace Eigen { + +/** \eigenManualPage TutorialAdvancedInitialization Advanced initialization + +This page discusses several advanced methods for initializing matrices. It gives more details on the +comma-initializer, which was introduced before. It also explains how to get special matrices such as the +identity matrix and the zero matrix. + +\eigenAutoToc + +\section TutorialAdvancedInitializationCommaInitializer The comma initializer + +Eigen offers a comma initializer syntax which allows the user to easily set all the coefficients of a matrix, +vector or array. Simply list the coefficients, starting at the top-left corner and moving from left to right +and from the top to the bottom. The size of the object needs to be specified beforehand. If you list too few +or too many coefficients, Eigen will complain. + + + + +
Example:Output:
+\include Tutorial_commainit_01.cpp + +\verbinclude Tutorial_commainit_01.out +
+ +Moreover, the elements of the initialization list may themselves be vectors or matrices. A common use is +to join vectors or matrices together. For example, here is how to join two row vectors together. Remember +that you have to set the size before you can use the comma initializer. + + + + +
Example:Output:
+\include Tutorial_AdvancedInitialization_Join.cpp + +\verbinclude Tutorial_AdvancedInitialization_Join.out +
+ +We can use the same technique to initialize matrices with a block structure. + + + + +
Example:Output:
+\include Tutorial_AdvancedInitialization_Block.cpp + +\verbinclude Tutorial_AdvancedInitialization_Block.out +
+ +The comma initializer can also be used to fill block expressions such as m.row(i). Here is a more +complicated way to get the same result as in the first example above: + + + + +
Example:Output:
+\include Tutorial_commainit_01b.cpp + +\verbinclude Tutorial_commainit_01b.out +
+ + +\section TutorialAdvancedInitializationSpecialMatrices Special matrices and arrays + +The Matrix and Array classes have static methods like \link DenseBase::Zero() Zero()\endlink, which can be +used to initialize all coefficients to zero. There are three variants. The first variant takes no arguments +and can only be used for fixed-size objects. If you want to initialize a dynamic-size object to zero, you need +to specify the size. Thus, the second variant requires one argument and can be used for one-dimensional +dynamic-size objects, while the third variant requires two arguments and can be used for two-dimensional +objects. All three variants are illustrated in the following example: + + + + +
Example:Output:
+\include Tutorial_AdvancedInitialization_Zero.cpp + +\verbinclude Tutorial_AdvancedInitialization_Zero.out +
+ +Similarly, the static method \link DenseBase::Constant() Constant\endlink(value) sets all coefficients to \c value. +If the size of the object needs to be specified, the additional arguments go before the \c value +argument, as in MatrixXd::Constant(rows, cols, value). The method \link DenseBase::Random() Random() +\endlink fills the matrix or array with random coefficients. The identity matrix can be obtained by calling +\link MatrixBase::Identity() Identity()\endlink; this method is only available for Matrix, not for Array, +because "identity matrix" is a linear algebra concept. The method +\link DenseBase::LinSpaced LinSpaced\endlink(size, low, high) is only available for vectors and +one-dimensional arrays; it yields a vector of the specified size whose coefficients are equally spaced between +\c low and \c high. The method \c LinSpaced() is illustrated in the following example, which prints a table +with angles in degrees, the corresponding angle in radians, and their sine and cosine. + + + + +
Example:Output:
+\include Tutorial_AdvancedInitialization_LinSpaced.cpp + +\verbinclude Tutorial_AdvancedInitialization_LinSpaced.out +
+ +This example shows that objects like the ones returned by LinSpaced() can be assigned to variables (and +expressions). Eigen defines utility functions like \link DenseBase::setZero() setZero()\endlink, +\link MatrixBase::setIdentity() \endlink and \link DenseBase::setLinSpaced() \endlink to do this +conveniently. The following example contrasts three ways to construct the matrix +\f$ J = \bigl[ \begin{smallmatrix} O & I \\ I & O \end{smallmatrix} \bigr] \f$: using static methods and +assignment, using static methods and the comma-initializer, or using the setXxx() methods. + + + + +
Example:Output:
+\include Tutorial_AdvancedInitialization_ThreeWays.cpp + +\verbinclude Tutorial_AdvancedInitialization_ThreeWays.out +
+ +A summary of all pre-defined matrix, vector and array objects can be found in the \ref QuickRefPage. + + +\section TutorialAdvancedInitializationTemporaryObjects Usage as temporary objects + +As shown above, static methods as Zero() and Constant() can be used to initialize variables at the time of +declaration or at the right-hand side of an assignment operator. You can think of these methods as returning a +matrix or array; in fact, they return so-called \ref TopicEigenExpressionTemplates "expression objects" which +evaluate to a matrix or array when needed, so that this syntax does not incur any overhead. + +These expressions can also be used as a temporary object. The second example in +the \ref GettingStarted guide, which we reproduce here, already illustrates this. + + + + +
Example:Output:
+\include QuickStart_example2_dynamic.cpp + +\verbinclude QuickStart_example2_dynamic.out +
+ +The expression m + MatrixXf::Constant(3,3,1.2) constructs the 3-by-3 matrix expression with all its coefficients +equal to 1.2 plus the corresponding coefficient of \a m. + +The comma-initializer, too, can also be used to construct temporary objects. The following example constructs a random +matrix of size 2-by-3, and then multiplies this matrix on the left with +\f$ \bigl[ \begin{smallmatrix} 0 & 1 \\ 1 & 0 \end{smallmatrix} \bigr] \f$. + + + + +
Example:Output:
+\include Tutorial_AdvancedInitialization_CommaTemporary.cpp + +\verbinclude Tutorial_AdvancedInitialization_CommaTemporary.out +
+ +The \link CommaInitializer::finished() finished() \endlink method is necessary here to get the actual matrix +object once the comma initialization of our temporary submatrix is done. + + +*/ + +} diff --git a/include/eigen/doc/TutorialBlockOperations.dox b/include/eigen/doc/TutorialBlockOperations.dox new file mode 100644 index 0000000000000000000000000000000000000000..df277482c897e23675520e21e0fb07b50ff0cce5 --- /dev/null +++ b/include/eigen/doc/TutorialBlockOperations.dox @@ -0,0 +1,242 @@ +namespace Eigen { + +/** \eigenManualPage TutorialBlockOperations Block operations + +This page explains the essentials of block operations. +A block is a rectangular part of a matrix or array. Blocks expressions can be used both +as rvalues and as lvalues. As usual with Eigen expressions, this abstraction has zero runtime cost +provided that you let your compiler optimize. + +\eigenAutoToc + +\section TutorialBlockOperationsUsing Using block operations + +The most general block operation in Eigen is called \link DenseBase::block() .block() \endlink. +There are two versions, whose syntax is as follows: + + + + + + + + +
\b %Block \b operation +Version constructing a \n dynamic-size block expressionVersion constructing a \n fixed-size block expression
%Block of size (p,q), starting at (i,j)\code +matrix.block(i,j,p,q);\endcode \code +matrix.block(i,j);\endcode
+ +As always in Eigen, indices start at 0. + +Both versions can be used on fixed-size and dynamic-size matrices and arrays. +These two expressions are semantically equivalent. +The only difference is that the fixed-size version will typically give you faster code if the block size is small, +but requires this size to be known at compile time. + +The following program uses the dynamic-size and fixed-size versions to print the values of several blocks inside a +matrix. + + + + +
Example:Output:
+\include Tutorial_BlockOperations_print_block.cpp + +\verbinclude Tutorial_BlockOperations_print_block.out +
+ +In the above example the \link DenseBase::block() .block() \endlink function was employed as a \em rvalue, i.e. +it was only read from. However, blocks can also be used as \em lvalues, meaning that you can assign to a block. + +This is illustrated in the following example. This example also demonstrates blocks in arrays, which works exactly like the above-demonstrated blocks in matrices. + + + + +
Example:Output:
+\include Tutorial_BlockOperations_block_assignment.cpp + +\verbinclude Tutorial_BlockOperations_block_assignment.out +
+ +While the \link DenseBase::block() .block() \endlink method can be used for any block operation, there are +other methods for special cases, providing more specialized API and/or better performance. On the topic of performance, all what +matters is that you give Eigen as much information as possible at compile time. For example, if your block is a single whole column in a matrix, +using the specialized \link DenseBase::col() .col() \endlink function described below lets Eigen know that, which can give it optimization opportunities. + +The rest of this page describes these specialized methods. + +\section TutorialBlockOperationsSyntaxColumnRows Columns and rows + +Individual columns and rows are special cases of blocks. Eigen provides methods to easily address them: +\link DenseBase::col() .col() \endlink and \link DenseBase::row() .row()\endlink. + + + + + + + + + + +
%Block operationMethod
ith row + \link DenseBase::row() * \endlink\code +matrix.row(i);\endcode
jth column + \link DenseBase::col() * \endlink\code +matrix.col(j);\endcode
+ +The argument for \p col() and \p row() is the index of the column or row to be accessed. As always in Eigen, indices start at 0. + + + + +
Example:Output:
+\include Tutorial_BlockOperations_colrow.cpp + +\verbinclude Tutorial_BlockOperations_colrow.out +
+ +That example also demonstrates that block expressions (here columns) can be used in arithmetic like any other expression. + + +\section TutorialBlockOperationsSyntaxCorners Corner-related operations + +Eigen also provides special methods for blocks that are flushed against one of the corners or sides of a +matrix or array. For instance, \link DenseBase::topLeftCorner() .topLeftCorner() \endlink can be used to refer +to a block in the top-left corner of a matrix. + +The different possibilities are summarized in the following table: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
%Block \b operation +Version constructing a \n dynamic-size block expressionVersion constructing a \n fixed-size block expression
Top-left p by q block \link DenseBase::topLeftCorner() * \endlink\code +matrix.topLeftCorner(p,q);\endcode \code +matrix.topLeftCorner();\endcode
Bottom-left p by q block + \link DenseBase::bottomLeftCorner() * \endlink\code +matrix.bottomLeftCorner(p,q);\endcode \code +matrix.bottomLeftCorner();\endcode
Top-right p by q block + \link DenseBase::topRightCorner() * \endlink\code +matrix.topRightCorner(p,q);\endcode \code +matrix.topRightCorner();\endcode
Bottom-right p by q block + \link DenseBase::bottomRightCorner() * \endlink\code +matrix.bottomRightCorner(p,q);\endcode \code +matrix.bottomRightCorner();\endcode
%Block containing the first q rows + \link DenseBase::topRows() * \endlink\code +matrix.topRows(q);\endcode \code +matrix.topRows();\endcode
%Block containing the last q rows + \link DenseBase::bottomRows() * \endlink\code +matrix.bottomRows(q);\endcode \code +matrix.bottomRows();\endcode
%Block containing the first p columns + \link DenseBase::leftCols() * \endlink\code +matrix.leftCols(p);\endcode \code +matrix.leftCols

();\endcode

%Block containing the last q columns + \link DenseBase::rightCols() * \endlink\code +matrix.rightCols(q);\endcode \code +matrix.rightCols();\endcode
%Block containing the q columns starting from i + \link DenseBase::middleCols() * \endlink\code +matrix.middleCols(i,q);\endcode \code +matrix.middleCols(i);\endcode
%Block containing the q rows starting from i + \link DenseBase::middleRows() * \endlink\code +matrix.middleRows(i,q);\endcode \code +matrix.middleRows(i);\endcode
+ +Here is a simple example illustrating the use of the operations presented above: + + + + +
Example:Output:
+\include Tutorial_BlockOperations_corner.cpp + +\verbinclude Tutorial_BlockOperations_corner.out +
+ + +\section TutorialBlockOperationsSyntaxVectors Block operations for vectors + +Eigen also provides a set of block operations designed specifically for the special case of vectors and one-dimensional arrays: + + + + + + + + + + + + + + + + + +
%Block operationVersion constructing a \n dynamic-size block expressionVersion constructing a \n fixed-size block expression
%Block containing the first \p n elements + \link DenseBase::head() * \endlink\code +vector.head(n);\endcode \code +vector.head();\endcode
%Block containing the last \p n elements + \link DenseBase::tail() * \endlink\code +vector.tail(n);\endcode \code +vector.tail();\endcode
%Block containing \p n elements, starting at position \p i + \link DenseBase::segment() * \endlink\code +vector.segment(i,n);\endcode \code +vector.segment(i);\endcode
+ + +An example is presented below: + + + +
Example:Output:
+\include Tutorial_BlockOperations_vector.cpp + +\verbinclude Tutorial_BlockOperations_vector.out +
+ +*/ + +} diff --git a/include/eigen/doc/TutorialGeometry.dox b/include/eigen/doc/TutorialGeometry.dox new file mode 100644 index 0000000000000000000000000000000000000000..30bc25a9756c31a7900dc8ebe2f404ec13447e61 --- /dev/null +++ b/include/eigen/doc/TutorialGeometry.dox @@ -0,0 +1,242 @@ +namespace Eigen { + +/** \eigenManualPage TutorialGeometry Space transformations + +In this page, we will introduce the many possibilities offered by the \ref Geometry_Module "geometry module" to deal with 2D and 3D rotations and projective or affine transformations. + +\eigenAutoToc + +Eigen's Geometry module provides two different kinds of geometric transformations: + - Abstract transformations, such as rotations (represented by \ref AngleAxis "angle and axis" or by a \ref Quaternion "quaternion"), \ref Translation "translations", \ref Scaling() "scalings". These transformations are NOT represented as matrices, but you can nevertheless mix them with matrices and vectors in expressions, and convert them to matrices if you wish. + - Projective or affine transformation matrices: see the Transform class. These are really matrices. + +\note If you are working with OpenGL 4x4 matrices then Affine3f and Affine3d are what you want. Since Eigen defaults to column-major storage, you can directly use the Transform::data() method to pass your transformation matrix to OpenGL. + +You can construct a Transform from an abstract transformation, like this: +\code + Transform t(AngleAxis(angle,axis)); +\endcode +or like this: +\code + Transform t; + t = AngleAxis(angle,axis); +\endcode +But note that unfortunately, because of how C++ works, you can \b not do this: +\code + Transform t = AngleAxis(angle,axis); +\endcode +\b Explanation: In the C++ language, this would require Transform to have a non-explicit conversion constructor from AngleAxis, but we really don't want to allow implicit casting here. + + +\section TutorialGeoElementaryTransformations Transformation types + + + + + + + + + + +
Transformation typeTypical initialization code
+\ref Rotation2D "2D rotation" from an angle\code +Rotation2D rot2(angle_in_radian);\endcode
+3D rotation as an \ref AngleAxis "angle + axis"\code +AngleAxis aa(angle_in_radian, Vector3f(ax,ay,az));\endcode +The axis vector must be normalized.
+3D rotation as a \ref Quaternion "quaternion"\code +Quaternion q; q = AngleAxis(angle_in_radian, axis);\endcode
+N-D Scaling\code +Scaling(sx, sy) +Scaling(sx, sy, sz) +Scaling(s) +Scaling(vecN)\endcode
+N-D Translation\code +Translation(tx, ty) +Translation(tx, ty, tz) +Translation(s) +Translation(vecN)\endcode
+N-D \ref TutorialGeoTransform "Affine transformation"\code +Transform t = concatenation_of_any_transformations; +Transform t = Translation3f(p) * AngleAxisf(a,axis) * Scaling(s);\endcode
+N-D Linear transformations \n +(pure rotations, \n scaling, etc.)\code +Matrix t = concatenation_of_rotations_and_scalings; +Matrix t = Rotation2Df(a) * Scaling(s); +Matrix t = AngleAxisf(a,axis) * Scaling(s);\endcode
+ +Notes on rotations\n To transform more than a single vector the preferred +representations are rotation matrices, while for other usages Quaternion is the +representation of choice as they are compact, fast and stable. Finally Rotation2D and +AngleAxis are mainly convenient types to create other rotation objects. + +Notes on Translation and Scaling\n Like AngleAxis, these classes were +designed to simplify the creation/initialization of linear (Matrix) and affine (Transform) +transformations. Nevertheless, unlike AngleAxis which is inefficient to use, these classes +might still be interesting to write generic and efficient algorithms taking as input any +kind of transformations. + +Any of the above transformation types can be converted to any other types of the same nature, +or to a more generic type. Here are some additional examples: + + +
\code +Rotation2Df r; r = Matrix2f(..); // assumes a pure rotation matrix +AngleAxisf aa; aa = Quaternionf(..); +AngleAxisf aa; aa = Matrix3f(..); // assumes a pure rotation matrix +Matrix2f m; m = Rotation2Df(..); +Matrix3f m; m = Quaternionf(..); Matrix3f m; m = Scaling(..); +Affine3f m; m = AngleAxis3f(..); Affine3f m; m = Scaling(..); +Affine3f m; m = Translation3f(..); Affine3f m; m = Matrix3f(..); +\endcode
+ + +top\section TutorialGeoCommontransformationAPI Common API across transformation types + +To some extent, Eigen's \ref Geometry_Module "geometry module" allows you to write +generic algorithms working on any kind of transformation representations: + + + + + +
+Concatenation of two transformations\code +gen1 * gen2;\endcode
Apply the transformation to a vector\code +vec2 = gen1 * vec1;\endcode
Get the inverse of the transformation\code +gen2 = gen1.inverse();\endcode
Spherical interpolation \n (Rotation2D and Quaternion only)\code +rot3 = rot1.slerp(alpha,rot2);\endcode
+ + + +top\section TutorialGeoTransform Affine transformations +Generic affine transformations are represented by the Transform class which internally +is a (Dim+1)^2 matrix. In Eigen we have chosen to not distinghish between points and +vectors such that all points are actually represented by displacement vectors from the +origin ( \f$ \mathbf{p} \equiv \mathbf{p}-0 \f$ ). With that in mind, real points and +vector distinguish when the transformation is applied. + + + + + + + + +
+Apply the transformation to a \b point \code +VectorNf p1, p2; +p2 = t * p1;\endcode
+Apply the transformation to a \b vector \code +VectorNf vec1, vec2; +vec2 = t.linear() * vec1;\endcode
+Apply a \em general transformation \n to a \b normal \b vector \n +\code +VectorNf n1, n2; +MatrixNf normalMatrix = t.linear().inverse().transpose(); +n2 = (normalMatrix * n1).normalized();\endcode
(See subject 5.27 of this faq for the explanations)
+Apply a transformation with \em pure \em rotation \n to a \b normal \b vector +(no scaling, no shear)\code +n2 = t.linear() * n1;\endcode
+OpenGL compatibility \b 3D \code +glLoadMatrixf(t.data());\endcode
+OpenGL compatibility \b 2D \code +Affine3f aux(Affine3f::Identity()); +aux.linear().topLeftCorner<2,2>() = t.linear(); +aux.translation().start<2>() = t.translation(); +glLoadMatrixf(aux.data());\endcode
+ +\b Component \b accessors + + + + + + +
+full read-write access to the internal matrix\code +t.matrix() = matN1xN1; // N1 means N+1 +matN1xN1 = t.matrix(); +\endcode
+coefficient accessors\code +t(i,j) = scalar; <=> t.matrix()(i,j) = scalar; +scalar = t(i,j); <=> scalar = t.matrix()(i,j); +\endcode
+translation part\code +t.translation() = vecN; +vecN = t.translation(); +\endcode
+linear part\code +t.linear() = matNxN; +matNxN = t.linear(); +\endcode
+extract the rotation matrix\code +matNxN = t.rotation(); +\endcode
+ + +\b Transformation \b creation \n +While transformation objects can be created and updated concatenating elementary transformations, +the Transform class also features a procedural API: + + + + + + +
procedural APIequivalent natural API
Translation\code +t.translate(Vector_(tx,ty,..)); +t.pretranslate(Vector_(tx,ty,..)); +\endcode\code +t *= Translation_(tx,ty,..); +t = Translation_(tx,ty,..) * t; +\endcode
\b Rotation \n In 2D and for the procedural API, any_rotation can also \n be an angle in radian\code +t.rotate(any_rotation); +t.prerotate(any_rotation); +\endcode\code +t *= any_rotation; +t = any_rotation * t; +\endcode
Scaling\code +t.scale(Vector_(sx,sy,..)); +t.scale(s); +t.prescale(Vector_(sx,sy,..)); +t.prescale(s); +\endcode\code +t *= Scaling(sx,sy,..); +t *= Scaling(s); +t = Scaling(sx,sy,..) * t; +t = Scaling(s) * t; +\endcode
Shear transformation \n ( \b 2D \b only ! )\code +t.shear(sx,sy); +t.preshear(sx,sy); +\endcode
+ +Note that in both API, any many transformations can be concatenated in a single expression as shown in the two following equivalent examples: + + + +
\code +t.pretranslate(..).rotate(..).translate(..).scale(..); +\endcode
\code +t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling(..); +\endcode
+ + + +top\section TutorialGeoEulerAngles Euler angles + + +
+Euler angles might be convenient to create rotation objects. +On the other hand, since there exist 24 different conventions, they are pretty confusing to use. This example shows how +to create a rotation matrix according to the 2-1-2 convention.\code +Matrix3f m; +m = AngleAxisf(angle1, Vector3f::UnitZ()) + * * AngleAxisf(angle2, Vector3f::UnitY()) + * * AngleAxisf(angle3, Vector3f::UnitZ()); +\endcode
+ +*/ + +} diff --git a/include/eigen/doc/TutorialLinearAlgebra.dox b/include/eigen/doc/TutorialLinearAlgebra.dox new file mode 100644 index 0000000000000000000000000000000000000000..8042fcad333788acc479a97d1b39809d3bcefd0a --- /dev/null +++ b/include/eigen/doc/TutorialLinearAlgebra.dox @@ -0,0 +1,299 @@ +namespace Eigen { + +/** \eigenManualPage TutorialLinearAlgebra Linear algebra and decompositions + +This page explains how to solve linear systems, compute various decompositions such as LU, +QR, %SVD, eigendecompositions... After reading this page, don't miss our +\link TopicLinearAlgebraDecompositions catalogue \endlink of dense matrix decompositions. + +\eigenAutoToc + +\section TutorialLinAlgBasicSolve Basic linear solving + +\b The \b problem: You have a system of equations, that you have written as a single matrix equation + \f[ Ax \: = \: b \f] +Where \a A and \a b are matrices (\a b could be a vector, as a special case). You want to find a solution \a x. + +\b The \b solution: You can choose between various decompositions, depending on the properties of your matrix \a A, +and depending on whether you favor speed or accuracy. However, let's start with an example that works in all cases, +and is a good compromise: + + + + + + +
Example:Output:
\include TutorialLinAlgExSolveColPivHouseholderQR.cpp \verbinclude TutorialLinAlgExSolveColPivHouseholderQR.out
+ +In this example, the colPivHouseholderQr() method returns an object of class ColPivHouseholderQR. Since here the +matrix is of type Matrix3f, this line could have been replaced by: +\code +ColPivHouseholderQR dec(A); +Vector3f x = dec.solve(b); +\endcode + +Here, ColPivHouseholderQR is a QR decomposition with column pivoting. It's a good compromise for this tutorial, as it +works for all matrices while being quite fast. Here is a table of some other decompositions that you can choose from, +depending on your matrix, the problem you are trying to solve, and the trade-off you want to make: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DecompositionMethodRequirements
on the matrix
Speed
(small-to-medium)
Speed
(large)
Accuracy
PartialPivLUpartialPivLu()Invertible+++++
FullPivLUfullPivLu()None-- -+++
HouseholderQRhouseholderQr()None+++++
ColPivHouseholderQRcolPivHouseholderQr()None+-+++
FullPivHouseholderQRfullPivHouseholderQr()None-- -+++
CompleteOrthogonalDecompositioncompleteOrthogonalDecomposition()None+-+++
LLTllt()Positive definite+++++++
LDLTldlt()Positive or negative
semidefinite
++++++
BDCSVDbdcSvd()None--+++
JacobiSVDjacobiSvd()None-- - -+++
+To get an overview of the true relative speed of the different decompositions, check this \link DenseDecompositionBenchmark benchmark \endlink. + +All of these decompositions offer a solve() method that works as in the above example. + +If you know more about the properties of your matrix, you can use the above table to select the best method. +For example, a good choice for solving linear systems with a non-symmetric matrix of full rank is PartialPivLU. +If you know that your matrix is also symmetric and positive definite, the above table says that +a very good choice is the LLT or LDLT decomposition. Here's an example, also demonstrating that using a general +matrix (not a vector) as right hand side is possible: + + + + + + + +
Example:Output:
\include TutorialLinAlgExSolveLDLT.cpp \verbinclude TutorialLinAlgExSolveLDLT.out
+ +For a \ref TopicLinearAlgebraDecompositions "much more complete table" comparing all decompositions supported by Eigen (notice that Eigen +supports many other decompositions), see our special page on +\ref TopicLinearAlgebraDecompositions "this topic". + + +\section TutorialLinAlgLeastsquares Least squares solving + +The most general and accurate method to solve under- or over-determined linear systems +in the least squares sense, is the SVD decomposition. Eigen provides two implementations. +The recommended one is the BDCSVD class, which scales well for large problems +and automatically falls back to the JacobiSVD class for smaller problems. +For both classes, their solve() method solved the linear system in the least-squares +sense. + +Here is an example: + + + + + + +
Example:Output:
\include TutorialLinAlgSVDSolve.cpp \verbinclude TutorialLinAlgSVDSolve.out
+ +An alternative to the SVD, which is usually faster and about as accurate, is CompleteOrthogonalDecomposition. + +Again, if you know more about the problem, the table above contains methods that are potentially faster. +If your matrix is full rank, HouseHolderQR is the method of choice. If your matrix is full rank and well conditioned, +using the Cholesky decomposition (LLT) on the matrix of the normal equations can be faster still. +Our page on \link LeastSquares least squares solving \endlink has more details. + + +\section TutorialLinAlgSolutionExists Checking if a matrix is singular + +Only you know what error margin you want to allow for a solution to be considered valid. +So Eigen lets you do this computation for yourself, if you want to, as in this example: + + + + + + + +
Example:Output:
\include TutorialLinAlgExComputeSolveError.cpp \verbinclude TutorialLinAlgExComputeSolveError.out
+ +\section TutorialLinAlgEigensolving Computing eigenvalues and eigenvectors + +You need an eigendecomposition here, see available such decompositions on \ref TopicLinearAlgebraDecompositions "this page". +Make sure to check if your matrix is self-adjoint, as is often the case in these problems. Here's an example using +SelfAdjointEigenSolver, it could easily be adapted to general matrices using EigenSolver or ComplexEigenSolver. + +The computation of eigenvalues and eigenvectors does not necessarily converge, but such failure to converge is +very rare. The call to info() is to check for this possibility. + + + + + + + +
Example:Output:
\include TutorialLinAlgSelfAdjointEigenSolver.cpp \verbinclude TutorialLinAlgSelfAdjointEigenSolver.out
+ +\section TutorialLinAlgInverse Computing inverse and determinant + +First of all, make sure that you really want this. While inverse and determinant are fundamental mathematical concepts, +in \em numerical linear algebra they are not as useful as in pure mathematics. Inverse computations are often +advantageously replaced by solve() operations, and the determinant is often \em not a good way of checking if a matrix +is invertible. + +However, for \em very \em small matrices, the above may not be true, and inverse and determinant can be very useful. + +While certain decompositions, such as PartialPivLU and FullPivLU, offer inverse() and determinant() methods, you can also +call inverse() and determinant() directly on a matrix. If your matrix is of a very small fixed size (at most 4x4) this +allows Eigen to avoid performing a LU decomposition, and instead use formulas that are more efficient on such small matrices. + +Here is an example: + + + + + + +
Example:Output:
\include TutorialLinAlgInverseDeterminant.cpp \verbinclude TutorialLinAlgInverseDeterminant.out
+ +\section TutorialLinAlgSeparateComputation Separating the computation from the construction + +In the above examples, the decomposition was computed at the same time that the decomposition object was constructed. +There are however situations where you might want to separate these two things, for example if you don't know, +at the time of the construction, the matrix that you will want to decompose; or if you want to reuse an existing +decomposition object. + +What makes this possible is that: +\li all decompositions have a default constructor, +\li all decompositions have a compute(matrix) method that does the computation, and that may be called again + on an already-computed decomposition, reinitializing it. + +For example: + + + + + + + +
Example:Output:
\include TutorialLinAlgComputeTwice.cpp \verbinclude TutorialLinAlgComputeTwice.out
+ +Finally, you can tell the decomposition constructor to preallocate storage for decomposing matrices of a given size, +so that when you subsequently decompose such matrices, no dynamic memory allocation is performed (of course, if you +are using fixed-size matrices, no dynamic memory allocation happens at all). This is done by just +passing the size to the decomposition constructor, as in this example: +\code +HouseholderQR qr(50,50); +MatrixXf A = MatrixXf::Random(50,50); +qr.compute(A); // no dynamic memory allocation +\endcode + +\section TutorialLinAlgRankRevealing Rank-revealing decompositions + +Certain decompositions are rank-revealing, i.e. are able to compute the rank of a matrix. These are typically +also the decompositions that behave best in the face of a non-full-rank matrix (which in the square case means a +singular matrix). On \ref TopicLinearAlgebraDecompositions "this table" you can see for all our decompositions +whether they are rank-revealing or not. + +Rank-revealing decompositions offer at least a rank() method. They can also offer convenience methods such as isInvertible(), +and some are also providing methods to compute the kernel (null-space) and image (column-space) of the matrix, as is the +case with FullPivLU: + + + + + + + +
Example:Output:
\include TutorialLinAlgRankRevealing.cpp \verbinclude TutorialLinAlgRankRevealing.out
+ +Of course, any rank computation depends on the choice of an arbitrary threshold, since practically no +floating-point matrix is \em exactly rank-deficient. Eigen picks a sensible default threshold, which depends +on the decomposition but is typically the diagonal size times machine epsilon. While this is the best default we +could pick, only you know what is the right threshold for your application. You can set this by calling setThreshold() +on your decomposition object before calling rank() or any other method that needs to use such a threshold. +The decomposition itself, i.e. the compute() method, is independent of the threshold. You don't need to recompute the +decomposition after you've changed the threshold. + + + + + + + +
Example:Output:
\include TutorialLinAlgSetThreshold.cpp \verbinclude TutorialLinAlgSetThreshold.out
+ +*/ + +} diff --git a/include/eigen/doc/TutorialMatrixArithmetic.dox b/include/eigen/doc/TutorialMatrixArithmetic.dox new file mode 100644 index 0000000000000000000000000000000000000000..f7589f56721a74d68c596d9b77c40c9e4d9f4922 --- /dev/null +++ b/include/eigen/doc/TutorialMatrixArithmetic.dox @@ -0,0 +1,214 @@ +namespace Eigen { + +/** \eigenManualPage TutorialMatrixArithmetic Matrix and vector arithmetic + +This page aims to provide an overview and some details on how to perform arithmetic +between matrices, vectors and scalars with Eigen. + +\eigenAutoToc + +\section TutorialArithmeticIntroduction Introduction + +Eigen offers matrix/vector arithmetic operations either through overloads of common C++ arithmetic operators such as +, -, *, +or through special methods such as \link MatrixBase::dot() dot()\endlink, \link MatrixBase::cross() cross()\endlink, etc. +For the Matrix class (matrices and vectors), operators are only overloaded to support +linear-algebraic operations. For example, \c matrix1 \c * \c matrix2 means matrix-matrix product, +and \c vector \c + \c scalar is just not allowed. If you want to perform all kinds of array operations, +not linear algebra, see the \ref TutorialArrayClass "next page". + +\section TutorialArithmeticAddSub Addition and subtraction + +The left hand side and right hand side must, of course, have the same numbers of rows and of columns. They must +also have the same \c Scalar type, as Eigen doesn't do automatic type promotion. The operators at hand here are: +\li binary operator + as in \c a+b +\li binary operator - as in \c a-b +\li unary operator - as in \c -a +\li compound operator += as in \c a+=b +\li compound operator -= as in \c a-=b + + + + +
Example:Output:
+\include tut_arithmetic_add_sub.cpp + +\verbinclude tut_arithmetic_add_sub.out +
+ +\section TutorialArithmeticScalarMulDiv Scalar multiplication and division + +Multiplication and division by a scalar is very simple too. The operators at hand here are: +\li binary operator * as in \c matrix*scalar +\li binary operator * as in \c scalar*matrix +\li binary operator / as in \c matrix/scalar +\li compound operator *= as in \c matrix*=scalar +\li compound operator /= as in \c matrix/=scalar + + + + +
Example:Output:
+\include tut_arithmetic_scalar_mul_div.cpp + +\verbinclude tut_arithmetic_scalar_mul_div.out +
+ + +\section TutorialArithmeticMentionXprTemplates A note about expression templates + +This is an advanced topic that we explain on \ref TopicEigenExpressionTemplates "this page", +but it is useful to just mention it now. In Eigen, arithmetic operators such as \c operator+ don't +perform any computation by themselves, they just return an "expression object" describing the computation to be +performed. The actual computation happens later, when the whole expression is evaluated, typically in \c operator=. +While this might sound heavy, any modern optimizing compiler is able to optimize away that abstraction and +the result is perfectly optimized code. For example, when you do: +\code +VectorXf a(50), b(50), c(50), d(50); +... +a = 3*b + 4*c + 5*d; +\endcode +Eigen compiles it to just one for loop, so that the arrays are traversed only once. Simplifying (e.g. ignoring +SIMD optimizations), this loop looks like this: +\code +for(int i = 0; i < 50; ++i) + a[i] = 3*b[i] + 4*c[i] + 5*d[i]; +\endcode +Thus, you should not be afraid of using relatively large arithmetic expressions with Eigen: it only gives Eigen +more opportunities for optimization. + +\section TutorialArithmeticTranspose Transposition and conjugation + +The transpose \f$ a^T \f$, conjugate \f$ \bar{a} \f$, and adjoint (i.e., conjugate transpose) \f$ a^* \f$ of a matrix or vector \f$ a \f$ are obtained by the member functions \link DenseBase::transpose() transpose()\endlink, \link MatrixBase::conjugate() conjugate()\endlink, and \link MatrixBase::adjoint() adjoint()\endlink, respectively. + + + + +
Example:Output:
+\include tut_arithmetic_transpose_conjugate.cpp + +\verbinclude tut_arithmetic_transpose_conjugate.out +
+ +For real matrices, \c conjugate() is a no-operation, and so \c adjoint() is equivalent to \c transpose(). + +As for basic arithmetic operators, \c transpose() and \c adjoint() simply return a proxy object without doing the actual transposition. If you do b = a.transpose(), then the transpose is evaluated at the same time as the result is written into \c b. However, there is a complication here. If you do a = a.transpose(), then Eigen starts writing the result into \c a before the evaluation of the transpose is finished. Therefore, the instruction a = a.transpose() does not replace \c a with its transpose, as one would expect: + + + +
Example:Output:
+\include tut_arithmetic_transpose_aliasing.cpp + +\verbinclude tut_arithmetic_transpose_aliasing.out +
+This is the so-called \ref TopicAliasing "aliasing issue". In "debug mode", i.e., when \ref TopicAssertions "assertions" have not been disabled, such common pitfalls are automatically detected. + +For \em in-place transposition, as for instance in a = a.transpose(), simply use the \link DenseBase::transposeInPlace() transposeInPlace()\endlink function: + + + +
Example:Output:
+\include tut_arithmetic_transpose_inplace.cpp + +\verbinclude tut_arithmetic_transpose_inplace.out +
+There is also the \link MatrixBase::adjointInPlace() adjointInPlace()\endlink function for complex matrices. + +\section TutorialArithmeticMatrixMul Matrix-matrix and matrix-vector multiplication + +Matrix-matrix multiplication is again done with \c operator*. Since vectors are a special +case of matrices, they are implicitly handled there too, so matrix-vector product is really just a special +case of matrix-matrix product, and so is vector-vector outer product. Thus, all these cases are handled by just +two operators: +\li binary operator * as in \c a*b +\li compound operator *= as in \c a*=b (this multiplies on the right: \c a*=b is equivalent to a = a*b) + + + + +
Example:Output:
+\include tut_arithmetic_matrix_mul.cpp + +\verbinclude tut_arithmetic_matrix_mul.out +
+ +Note: if you read the above paragraph on expression templates and are worried that doing \c m=m*m might cause +aliasing issues, be reassured for now: Eigen treats matrix multiplication as a special case and takes care of +introducing a temporary here, so it will compile \c m=m*m as: +\code +tmp = m*m; +m = tmp; +\endcode +If you know your matrix product can be safely evaluated into the destination matrix without aliasing issue, then you can use the \link MatrixBase::noalias() noalias()\endlink function to avoid the temporary, e.g.: +\code +c.noalias() += a * b; +\endcode +For more details on this topic, see the page on \ref TopicAliasing "aliasing". + +\b Note: for BLAS users worried about performance, expressions such as c.noalias() -= 2 * a.adjoint() * b; are fully optimized and trigger a single gemm-like function call. + +\section TutorialArithmeticDotAndCross Dot product and cross product + +For dot product and cross product, you need the \link MatrixBase::dot() dot()\endlink and \link MatrixBase::cross() cross()\endlink methods. Of course, the dot product can also be obtained as a 1x1 matrix as u.adjoint()*v. + + + +
Example:Output:
+\include tut_arithmetic_dot_cross.cpp + +\verbinclude tut_arithmetic_dot_cross.out +
+ +Remember that cross product is only for vectors of size 3. Dot product is for vectors of any sizes. +When using complex numbers, Eigen's dot product is conjugate-linear in the first variable and linear in the +second variable. + +\section TutorialArithmeticRedux Basic arithmetic reduction operations +Eigen also provides some reduction operations to reduce a given matrix or vector to a single value such as the sum (computed by \link DenseBase::sum() sum()\endlink), product (\link DenseBase::prod() prod()\endlink), or the maximum (\link DenseBase::maxCoeff() maxCoeff()\endlink) and minimum (\link DenseBase::minCoeff() minCoeff()\endlink) of all its coefficients. + + + + +
Example:Output:
+\include tut_arithmetic_redux_basic.cpp + +\verbinclude tut_arithmetic_redux_basic.out +
+ +The \em trace of a matrix, as returned by the function \link MatrixBase::trace() trace()\endlink, is the sum of the diagonal coefficients and can also be computed as efficiently using a.diagonal().sum(), as we will see later on. + +There also exist variants of the \c minCoeff and \c maxCoeff functions returning the coordinates of the respective coefficient via the arguments: + + + + +
Example:Output:
+\include tut_arithmetic_redux_minmax.cpp + +\verbinclude tut_arithmetic_redux_minmax.out +
+ + +\section TutorialArithmeticValidity Validity of operations +Eigen checks the validity of the operations that you perform. When possible, +it checks them at compile time, producing compilation errors. These error messages can be long and ugly, +but Eigen writes the important message in UPPERCASE_LETTERS_SO_IT_STANDS_OUT. For example: +\code + Matrix3f m; + Vector4f v; + v = m*v; // Compile-time error: YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES +\endcode + +Of course, in many cases, for example when checking dynamic sizes, the check cannot be performed at compile time. +Eigen then uses runtime assertions. This means that the program will abort with an error message when executing an illegal operation if it is run in "debug mode", and it will probably crash if assertions are turned off. + +\code + MatrixXf m(3,3); + VectorXf v(4); + v = m * v; // Run-time assertion failure here: "invalid matrix product" +\endcode + +For more details on this topic, see \ref TopicAssertions "this page". + +*/ + +} diff --git a/include/eigen/doc/TutorialReductionsVisitorsBroadcasting.dox b/include/eigen/doc/TutorialReductionsVisitorsBroadcasting.dox new file mode 100644 index 0000000000000000000000000000000000000000..f5322b4a6f0f5d92fc432c543790b8e5fe71688b --- /dev/null +++ b/include/eigen/doc/TutorialReductionsVisitorsBroadcasting.dox @@ -0,0 +1,266 @@ +namespace Eigen { + +/** \eigenManualPage TutorialReductionsVisitorsBroadcasting Reductions, visitors and broadcasting + +This page explains Eigen's reductions, visitors and broadcasting and how they are used with +\link MatrixBase matrices \endlink and \link ArrayBase arrays \endlink. + +\eigenAutoToc + +\section TutorialReductionsVisitorsBroadcastingReductions Reductions +In Eigen, a reduction is a function taking a matrix or array, and returning a single +scalar value. One of the most used reductions is \link DenseBase::sum() .sum() \endlink, +returning the sum of all the coefficients inside a given matrix or array. + + + + +
Example:Output:
+\include tut_arithmetic_redux_basic.cpp + +\verbinclude tut_arithmetic_redux_basic.out +
+ +The \em trace of a matrix, as returned by the function \c trace(), is the sum of the diagonal coefficients and can equivalently be computed a.diagonal().sum(). + + +\subsection TutorialReductionsVisitorsBroadcastingReductionsNorm Norm computations + +The (Euclidean a.k.a. \f$\ell^2\f$) squared norm of a vector can be obtained \link MatrixBase::squaredNorm() squaredNorm() \endlink. It is equal to the dot product of the vector by itself, and equivalently to the sum of squared absolute values of its coefficients. + +Eigen also provides the \link MatrixBase::norm() norm() \endlink method, which returns the square root of \link MatrixBase::squaredNorm() squaredNorm() \endlink. + +These operations can also operate on matrices; in that case, a n-by-p matrix is seen as a vector of size (n*p), so for example the \link MatrixBase::norm() norm() \endlink method returns the "Frobenius" or "Hilbert-Schmidt" norm. We refrain from speaking of the \f$\ell^2\f$ norm of a matrix because that can mean different things. + +If you want other coefficient-wise \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm lpNorm

() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients. + +The following example demonstrates these methods. + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.out +
+ +\b Operator \b norm: The 1-norm and \f$\infty\f$-norm matrix operator norms can easily be computed as follows: + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.out +
+See below for more explanations on the syntax of these expressions. + +\subsection TutorialReductionsVisitorsBroadcastingReductionsBool Boolean reductions + +The following reductions operate on boolean values: + - \link DenseBase::all() all() \endlink returns \b true if all of the coefficients in a given Matrix or Array evaluate to \b true . + - \link DenseBase::any() any() \endlink returns \b true if at least one of the coefficients in a given Matrix or Array evaluates to \b true . + - \link DenseBase::count() count() \endlink returns the number of coefficients in a given Matrix or Array that evaluate to \b true. + +These are typically used in conjunction with the coefficient-wise comparison and equality operators provided by Array. For instance, array > 0 is an %Array of the same size as \c array , with \b true at those positions where the corresponding coefficient of \c array is positive. Thus, (array > 0).all() tests whether all coefficients of \c array are positive. This can be seen in the following example: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.out +
+ +\subsection TutorialReductionsVisitorsBroadcastingReductionsUserdefined User defined reductions + +TODO + +In the meantime you can have a look at the DenseBase::redux() function. + +\section TutorialReductionsVisitorsBroadcastingVisitors Visitors +Visitors are useful when one wants to obtain the location of a coefficient inside +a Matrix or Array. The simplest examples are +\link MatrixBase::maxCoeff() maxCoeff(&x,&y) \endlink and +\link MatrixBase::minCoeff() minCoeff(&x,&y)\endlink, which can be used to find +the location of the greatest or smallest coefficient in a Matrix or +Array. + +The arguments passed to a visitor are pointers to the variables where the +row and column position are to be stored. These variables should be of type +\link Eigen::Index Index \endlink, as shown below: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_visitors.out +
+ +Both functions also return the value of the minimum or maximum coefficient. + +\section TutorialReductionsVisitorsBroadcastingPartialReductions Partial reductions +Partial reductions are reductions that can operate column- or row-wise on a Matrix or +Array, applying the reduction operation on each column or row and +returning a column or row vector with the corresponding values. Partial reductions are applied +with \link DenseBase::colwise() colwise() \endlink or \link DenseBase::rowwise() rowwise() \endlink. + +A simple example is obtaining the maximum of the elements +in each column in a given matrix, storing the result in a row vector: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_colwise.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_colwise.out +
+ +The same operation can be performed row-wise: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_rowwise.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_rowwise.out +
+ +Note that column-wise operations return a row vector, while row-wise operations return a column vector. + +\subsection TutorialReductionsVisitorsBroadcastingPartialReductionsCombined Combining partial reductions with other operations +It is also possible to use the result of a partial reduction to do further processing. +Here is another example that finds the column whose sum of elements is the maximum + within a matrix. With column-wise partial reductions this can be coded as: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_maxnorm.out +
+ +The previous example applies the \link DenseBase::sum() sum() \endlink reduction on each column +though the \link DenseBase::colwise() colwise() \endlink visitor, obtaining a new matrix whose +size is 1x4. + +Therefore, if +\f[ +\mbox{m} = \begin{bmatrix} 1 & 2 & 6 & 9 \\ + 3 & 1 & 7 & 2 \end{bmatrix} +\f] + +then + +\f[ +\mbox{m.colwise().sum()} = \begin{bmatrix} 4 & 3 & 13 & 11 \end{bmatrix} +\f] + +The \link DenseBase::maxCoeff() maxCoeff() \endlink reduction is finally applied +to obtain the column index where the maximum sum is found, +which is the column index 2 (third column) in this case. + + +\section TutorialReductionsVisitorsBroadcastingBroadcasting Broadcasting +The concept behind broadcasting is similar to partial reductions, with the difference that broadcasting +constructs an expression where a vector (column or row) is interpreted as a matrix by replicating it in +one direction. + +A simple example is to add a certain column vector to each column in a matrix. +This can be accomplished with: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.out +
+ +We can interpret the instruction mat.colwise() += v in two equivalent ways. It adds the vector \c v +to every column of the matrix. Alternatively, it can be interpreted as repeating the vector \c v four times to +form a four-by-two matrix which is then added to \c mat: +\f[ +\begin{bmatrix} 1 & 2 & 6 & 9 \\ 3 & 1 & 7 & 2 \end{bmatrix} ++ \begin{bmatrix} 0 & 0 & 0 & 0 \\ 1 & 1 & 1 & 1 \end{bmatrix} += \begin{bmatrix} 1 & 2 & 6 & 9 \\ 4 & 2 & 8 & 3 \end{bmatrix}. +\f] +The operators -=, + and - can also be used column-wise and row-wise. On arrays, we +can also use the operators *=, /=, * and / to perform coefficient-wise +multiplication and division column-wise or row-wise. These operators are not available on matrices because it +is not clear what they would do. If you want multiply column 0 of a matrix \c mat with \c v(0), column 1 with +\c v(1), and so on, then use mat = mat * v.asDiagonal(). + +It is important to point out that the vector to be added column-wise or row-wise must be of type Vector, +and cannot be a Matrix. If this is not met then you will get compile-time error. This also means that +broadcasting operations can only be applied with an object of type Vector, when operating with Matrix. +The same applies for the Array class, where the equivalent for VectorXf is ArrayXf. As always, you should +not mix arrays and matrices in the same expression. + +To perform the same operation row-wise we can do: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.out +
+ +\subsection TutorialReductionsVisitorsBroadcastingBroadcastingCombined Combining broadcasting with other operations +Broadcasting can also be combined with other operations, such as Matrix or Array operations, +reductions and partial reductions. + +Now that broadcasting, reductions and partial reductions have been introduced, we can dive into a more advanced example that finds +the nearest neighbour of a vector v within the columns of matrix m. The Euclidean distance will be used in this example, +computing the squared Euclidean distance with the partial reduction named \link MatrixBase::squaredNorm() squaredNorm() \endlink: + + + + +
Example:Output:
+\include Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp + +\verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.out +
+ +The line that does the job is +\code + (m.colwise() - v).colwise().squaredNorm().minCoeff(&index); +\endcode + +We will go step by step to understand what is happening: + + - m.colwise() - v is a broadcasting operation, subtracting v from each column in m. The result of this operation +is a new matrix whose size is the same as matrix m: \f[ + \mbox{m.colwise() - v} = + \begin{bmatrix} + -1 & 21 & 4 & 7 \\ + 0 & 8 & 4 & -1 + \end{bmatrix} +\f] + + - (m.colwise() - v).colwise().squaredNorm() is a partial reduction, computing the squared norm column-wise. The result of +this operation is a row vector where each coefficient is the squared Euclidean distance between each column in m and v: \f[ + \mbox{(m.colwise() - v).colwise().squaredNorm()} = + \begin{bmatrix} + 1 & 505 & 32 & 50 + \end{bmatrix} +\f] + + - Finally, minCoeff(&index) is used to obtain the index of the column in m that is closest to v in terms of Euclidean +distance. + +*/ + +} diff --git a/include/eigen/doc/TutorialReshape.dox b/include/eigen/doc/TutorialReshape.dox new file mode 100644 index 0000000000000000000000000000000000000000..07e5c3c0b569119eecc398527f22b26038ed9fbd --- /dev/null +++ b/include/eigen/doc/TutorialReshape.dox @@ -0,0 +1,82 @@ +namespace Eigen { + +/** \eigenManualPage TutorialReshape Reshape + +Since the version 3.4, %Eigen exposes convenient methods to reshape a matrix to another matrix of different sizes or vector. +All cases are handled via the `DenseBase::reshaped(NRowsType,NColsType)` and `DenseBase::reshaped()` functions. +Those functions do not perform in-place reshaping, but instead return a view on the input expression. + +\eigenAutoToc + +\section TutorialReshapeMat2Mat Reshaped 2D views + +The more general reshaping transformation is handled via: `reshaped(nrows,ncols)`. +Here is an example reshaping a 4x4 matrix to a 2x8 one: + + + + +
Example:Output:
+\include MatrixBase_reshaped_int_int.cpp + +\verbinclude MatrixBase_reshaped_int_int.out +
+ +By default, the input coefficients are always interpreted in column-major order regardless of the storage order of the input expression. +For more control on ordering, compile-time sizes, and automatic size deduction, please see de documentation of `DenseBase::reshaped(NRowsType,NColsType)` that contains all the details with many examples. + + +\section TutorialReshapeMat2Vec 1D linear views + +A very common usage of reshaping is to create a 1D linear view over a given 2D matrix or expression. +In this case, sizes can be deduced and thus omitted as in the following example: + + + + + +
Example:
+\include MatrixBase_reshaped_to_vector.cpp +
Output:
+\verbinclude MatrixBase_reshaped_to_vector.out +
+ +This shortcut always returns a column vector and by default input coefficients are always interpreted in column-major order. +Again, see the documentation of DenseBase::reshaped() for more control on the ordering. + +\section TutorialReshapeInPlace + +The above examples create reshaped views, but what about reshaping inplace a given matrix? +Of course this task in only conceivable for matrix and arrays having runtime dimensions. +In many cases, this can be accomplished via PlainObjectBase::resize(Index,Index): + + + + + +
Example:
+\include Tutorial_reshaped_vs_resize_1.cpp +
Output:
+\verbinclude Tutorial_reshaped_vs_resize_1.out +
+ +However beware that unlike \c reshaped, the result of \c resize depends on the input storage order. +It thus behaves similarly to `reshaped`: + + + + + +
Example:
+\include Tutorial_reshaped_vs_resize_2.cpp +
Output:
+\verbinclude Tutorial_reshaped_vs_resize_2.out +
+ +Finally, assigning a reshaped matrix to itself is currently not supported and will result to undefined-behavior because of \link TopicAliasing aliasing \endlink. +The following is forbidden: \code A = A.reshaped(2,8); \endcode +This is OK: \code A = A.reshaped(2,8).eval(); \endcode + +*/ + +} diff --git a/include/eigen/doc/TutorialSTL.dox b/include/eigen/doc/TutorialSTL.dox new file mode 100644 index 0000000000000000000000000000000000000000..9a825bc4885a2d0db802db59cd885a206ce77fb5 --- /dev/null +++ b/include/eigen/doc/TutorialSTL.dox @@ -0,0 +1,66 @@ +namespace Eigen { + +/** \eigenManualPage TutorialSTL STL iterators and algorithms + +Since the version 3.4, %Eigen's dense matrices and arrays provide STL compatible iterators. +As demonstrated below, this makes them naturally compatible with range-for-loops and STL's algorithms. + +\eigenAutoToc + +\section TutorialSTLVectors Iterating over 1D arrays and vectors + +Any dense 1D expressions exposes the pair of `begin()/end()` methods to iterate over them. + +This directly enables c++11 range for loops: + + + +
Example:Output:
+\include Tutorial_range_for_loop_1d_cxx11.cpp + +\verbinclude Tutorial_range_for_loop_1d_cxx11.out +
+ +One dimensional expressions can also easily be passed to STL algorithms: + + + +
Example:Output:
+\include Tutorial_std_sort.cpp + +\verbinclude Tutorial_std_sort.out +
+ +Similar to `std::vector`, 1D expressions also exposes the pair of `cbegin()/cend()` methods to conveniently get const iterators on non-const object. + +\section TutorialSTLMatrices Iterating over coefficients of 2D arrays and matrices + +STL iterators are intrinsically designed to iterate over 1D structures. +This is why `begin()/end()` methods are disabled for 2D expressions. +Iterating over all coefficients of a 2D expressions is still easily accomplished by creating a 1D linear view through `reshaped()`: + + + +
Example:Output:
+\include Tutorial_range_for_loop_2d_cxx11.cpp + +\verbinclude Tutorial_range_for_loop_2d_cxx11.out +
+ +\section TutorialSTLRowsColumns Iterating over rows or columns of 2D arrays and matrices + +It is also possible to get iterators over rows or columns of 2D expressions. +Those are available through the `rowwise()` and `colwise()` proxies. +Here is an example sorting each row of a matrix: + + + +
Example:Output:
+\include Tutorial_std_sort_rows_cxx11.cpp + +\verbinclude Tutorial_std_sort_rows_cxx11.out +
+ +*/ + +} diff --git a/include/eigen/doc/TutorialSlicingIndexing.dox b/include/eigen/doc/TutorialSlicingIndexing.dox new file mode 100644 index 0000000000000000000000000000000000000000..f0a9e346d126fa6ed2ab58263d4007d2676687e4 --- /dev/null +++ b/include/eigen/doc/TutorialSlicingIndexing.dox @@ -0,0 +1,245 @@ +namespace Eigen { + +/** \eigenManualPage TutorialSlicingIndexing Slicing and Indexing + +This page presents the numerous possibilities offered by `operator()` to index sub-set of rows and columns. +This API has been introduced in %Eigen 3.4. +It supports all the feature proposed by the \link TutorialBlockOperations block API \endlink, and much more. +In particular, it supports \b slicing that consists in taking a set of rows, columns, or elements, uniformly spaced within a matrix or indexed from an array of indices. + +\eigenAutoToc + +\section TutorialSlicingOverview Overview + +All the aforementioned operations are handled through the generic DenseBase::operator()(const RowIndices&, const ColIndices&) method. +Each argument can be: + - An integer indexing a single row or column, including symbolic indices. + - The symbol Eigen::indexing::all representing the whole set of respective rows or columns in increasing order. + - An ArithmeticSequence as constructed by the Eigen::seq, Eigen::seqN, or Eigen::indexing::lastN functions. + - Any 1D vector/array of integers including %Eigen's vector/array, expressions, std::vector, std::array, as well as plain C arrays: `int[N]`. + +More generally, it can accepts any object exposing the following two member functions: + \code + operator[]() const; + size() const; + \endcode +where `` stands for any integer type compatible with Eigen::Index (i.e. `std::ptrdiff_t`). + +\section TutorialSlicingBasic Basic slicing + +Taking a set of rows, columns, or elements, uniformly spaced within a matrix or vector is achieved through the Eigen::seq or Eigen::seqN functions where "seq" stands for arithmetic sequence. Their signatures are summarized below: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
functiondescriptionexample
\code seq(firstIdx,lastIdx) \endcoderepresents the sequence of integers ranging from \c firstIdx to \c lastIdx\code seq(2,5) <=> {2,3,4,5} \endcode
\code seq(firstIdx,lastIdx,incr) \endcodesame but using the increment \c incr to advance from one index to the next\code seq(2,8,2) <=> {2,4,6,8} \endcode
\code seqN(firstIdx,size) \endcoderepresents the sequence of \c size integers starting from \c firstIdx\code seqN(2,5) <=> {2,3,4,5,6} \endcode
\code seqN(firstIdx,size,incr) \endcodesame but using the increment \c incr to advance from one index to the next\code seqN(2,3,3) <=> {2,5,8} \endcode
+ +The \c firstIdx and \c lastIdx parameters can also be defined with the help of the Eigen::last symbol representing the index of the last row, column or element of the underlying matrix/vector once the arithmetic sequence is passed to it through operator(). +Here are some examples for a 2D array/matrix \c A and a 1D array/vector \c v. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IntentCodeBlock-API equivalence
Bottom-left corner starting at row \c i with \c n columns\code A(seq(i,last), seqN(0,n)) \endcode\code A.bottomLeftCorner(A.rows()-i,n) \endcode
%Block starting at \c i,j having \c m rows, and \c n columns\code A(seqN(i,m), seqN(j,n)) \endcode\code A.block(i,j,m,n) \endcode
%Block starting at \c i0,j0 and ending at \c i1,j1\code A(seq(i0,i1), seq(j0,j1)) \endcode\code A.block(i0,j0,i1-i0+1,j1-j0+1) \endcode
Even columns of A\code A(all, seq(0,last,2)) \endcode
First \c n odd rows of A\code A(seqN(1,n,2), all) \endcode
The second-last column\code A(all, last-1) \endcode\code A.col(A.cols()-2) \endcode
The middle row\code A(last/2, all) \endcode\code A.row((A.rows()-1)/2) \endcode
Last elements of v starting at i\code v(seq(i,last)) \endcode\code v.tail(v.size()-i) \endcode
Last \c n elements of v\code v(seq(last+1-n,last)) \endcode\code v.tail(n) \endcode
+ +As seen in the last example, referencing the last n elements (or rows/columns) is a bit cumbersome to write. +This becomes even more tricky and error prone with a non-default increment. +Here comes \link indexing_lastN Eigen::indexing::lastN(size) \endlink, and +\link indexing_lastN_with_incr Eigen::indexing::lastN(size,incr) \endlink: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IntentCodeBlock-API equivalence
Last \c n elements of v\code v(lastN(n)) \endcode\code v.tail(n) \endcode
Bottom-right corner of A of size \c m times \c n\code A(lastN(m), lastN(n)) \endcode\code A.bottomRightCorner(m,n) \endcode
Bottom-right corner of A of size \c m times \c n\code A(lastN(m), lastN(n)) \endcode\code A.bottomRightCorner(m,n) \endcode
Last \c n columns taking 1 column over 3\code A(all, lastN(n,3)) \endcode
+ +\section TutorialSlicingFixed Compile time size and increment + +In terms of performance, %Eigen and the compiler can take advantage of compile-time size and increment. +To this end, you can enforce compile-time parameters using Eigen::fix. +Such compile-time value can be combined with the Eigen::last symbol: +\code v(seq(last-fix<7>, last-fix<2>)) +\endcode +In this example %Eigen knowns at compile-time that the returned expression has 6 elements. +It is equivalent to: +\code v(seqN(last-7, fix<6>)) +\endcode + +We can revisit the even columns of A example as follows: +\code A(all, seq(fix<0>,last,fix<2>)) +\endcode + + +\section TutorialSlicingReverse Reverse order + +Row/column indices can also be enumerated in decreasing order using a negative increment. +For instance, one over two columns of A from the column 20 to 10: +\code A(all, seq(20, 10, fix<-2>)) +\endcode +The last \c n rows starting from the last one: +\code A(seqN(last, n, fix<-1>), all) +\endcode +You can also use the ArithmeticSequence::reverse() method to reverse its order. +The previous example can thus also be written as: +\code A(lastN(n).reverse(), all) +\endcode + + +\section TutorialSlicingArray Array of indices + +The generic `operator()` can also takes as input an arbitrary list of row or column indices stored as either an `ArrayXi`, a `std::vector`, `std::array`, etc. + + + + +
Example:Output:
+\include Slicing_stdvector_cxx11.cpp + +\verbinclude Slicing_stdvector_cxx11.out +
+ +You can also directly pass a static array: + + + +
Example:Output:
+\include Slicing_rawarray_cxx11.cpp + +\verbinclude Slicing_rawarray_cxx11.out +
+ +or expressions: + + + +
Example:Output:
+\include Slicing_arrayexpr.cpp + +\verbinclude Slicing_arrayexpr.out +
+ +When passing an object with a compile-time size such as `Array4i`, `std::array`, or a static array, then the returned expression also exhibit compile-time dimensions. + +\section TutorialSlicingCustomArray Custom index list + +More generally, `operator()` can accept as inputs any object \c ind of type \c T compatible with: +\code +Index s = ind.size(); or Index s = size(ind); +Index i; +i = ind[i]; +\endcode + +This means you can easily build your own fancy sequence generator and pass it to `operator()`. +Here is an example enlarging a given matrix while padding the additional first rows and columns through repetition: + + + + +
Example:Output:
+\include Slicing_custom_padding_cxx11.cpp + +\verbinclude Slicing_custom_padding_cxx11.out +
+ +
+ +*/ + +/* +TODO add: +so_repeat_inner.cpp +so_repeleme.cpp +*/ +} diff --git a/include/eigen/doc/UnalignedArrayAssert.dox b/include/eigen/doc/UnalignedArrayAssert.dox new file mode 100644 index 0000000000000000000000000000000000000000..ca674a267b642c351f7aebb24e61365736623244 --- /dev/null +++ b/include/eigen/doc/UnalignedArrayAssert.dox @@ -0,0 +1,133 @@ +namespace Eigen { + +/** \eigenManualPage TopicUnalignedArrayAssert Explanation of the assertion on unaligned arrays + +Hello! You are seeing this webpage because your program terminated on an assertion failure like this one: +

+my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
+Eigen::internal::matrix_array::internal::matrix_array()
+[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
+Assertion `(reinterpret_cast(array) & (sizemask)) == 0 && "this assertion
+is explained here: http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html
+**** READ THIS WEB PAGE !!! ****"' failed.
+
+ +There are 4 known causes for this issue. +If you can target \cpp17 only with a recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then you're lucky: enabling c++17 should be enough (if not, please report to us). +Otherwise, please read on to understand those issues and learn how to fix them. + +\eigenAutoToc + +\section where Where in my own code is the cause of the problem? + +First of all, you need to find out where in your own code this assertion was triggered from. At first glance, the error message doesn't look helpful, as it refers to a file inside Eigen! However, since your program crashed, if you can reproduce the crash, you can get a backtrace using any debugger. For example, if you're using GCC, you can use the GDB debugger as follows: +\code +$ gdb ./my_program # Start GDB on your program +> run # Start running your program +... # Now reproduce the crash! +> bt # Obtain the backtrace +\endcode +Now that you know precisely where in your own code the problem is happening, read on to understand what you need to change. + +\section c1 Cause 1: Structures having Eigen objects as members + +If you have code like this, + +\code +class Foo +{ + //... + Eigen::Vector4d v; + //... +}; +//... +Foo *foo = new Foo; +\endcode + +then you need to read this separate page: \ref TopicStructHavingEigenMembers "Structures Having Eigen Members". + +Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types". + +\section c2 Cause 2: STL Containers or manual memory allocation + +If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this, + +\code +std::vector my_vector; +struct my_class { ... Eigen::Matrix2d m; ... }; +std::map my_map; +\endcode + +then you need to read this separate page: \ref TopicStlContainers "Using STL Containers with Eigen". + +Note that here, Eigen::Matrix2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". + +The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c `std::make_shared` or `std::allocate_shared` for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers". + +\section c3 Cause 3: Passing Eigen objects by value + +If some function in your code is getting an %Eigen object passed by value, like this, + +\code +void func(Eigen::Vector4d v); +\endcode + +then you need to read this separate page: \ref TopicPassingByValue "Passing Eigen objects by value to functions". + +Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types". + +\section c4 Cause 4: Compiler making a wrong assumption on stack alignment (for instance GCC on Windows) + +This is a must-read for people using GCC on Windows (like MinGW or TDM-GCC). If you have this assertion failure in an innocent function declaring a local variable like this: + +\code +void foo() +{ + Eigen::Quaternionf q; + //... +} +\endcode + +then you need to read this separate page: \ref TopicWrongStackAlignment "Compiler making a wrong assumption on stack alignment". + +Note that here, Eigen::Quaternionf is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types". + + +\section explanation General explanation of this assertion + +\ref TopicFixedSizeVectorizable "Fixed-size vectorizable Eigen objects" must absolutely be created at properly aligned locations, otherwise SIMD instructions addressing them will crash. +For instance, SSE/NEON/MSA/Altivec/VSX targets will require 16-byte-alignment, whereas AVX and AVX512 targets may require up to 32 and 64 byte alignment respectively. + +%Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their `operator new`. + +However there are a few corner cases where these alignment settings get overridden: they are the possible causes for this assertion. + +\section getrid I don't care about optimal vectorization, how do I get rid of that stuff? + +Three possibilities: +
    +
  • Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way %Eigen won't try to over-align them, and thus won't assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.
  • +
  • Define \link TopicPreprocessorDirectivesPerformance EIGEN_MAX_STATIC_ALIGN_BYTES \endlink to 0. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of + vectorizing fixed-size objects (like Matrix4d) through unaligned stores (as controlled by \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink), while keeping unchanged the vectorization of dynamic-size objects + (like MatrixXd). On 64 bytes systems, you might also define it 16 to disable only 32 and 64 bytes of over-alignment. But do note that this breaks ABI compatibility with the default behavior of static alignment.
  • +
  • Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and `EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT`. This keeps the + 16-byte (or above) alignment code and thus preserves ABI compatibility, but completely disables vectorization.
  • +
+ +If you want to know why defining `EIGEN_DONT_VECTORIZE` does not by itself disable 16-byte (or above) alignment and the assertion, here's the explanation: + +It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization. +It doesn't disable 16-byte (or above) alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path. + +\section checkmycode How can I check my code is safe regarding alignment issues? + +Unfortunately, there is no possibility in c++ to detect any of the aforementioned shortcoming at compile time (though static analyzers are becoming more and more powerful and could detect some of them). +Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page. +Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default. + +The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink. + + +*/ + +} diff --git a/include/eigen/doc/UsingBlasLapackBackends.dox b/include/eigen/doc/UsingBlasLapackBackends.dox new file mode 100644 index 0000000000000000000000000000000000000000..caa597122a5bef269ef492e1422a69da1f292184 --- /dev/null +++ b/include/eigen/doc/UsingBlasLapackBackends.dox @@ -0,0 +1,133 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + Copyright (C) 2011-2016 Gael Guennebaud + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Documentation on the use of BLAS/LAPACK libraries through Eigen + ******************************************************************************** +*/ + +namespace Eigen { + +/** \page TopicUsingBlasLapack Using BLAS/LAPACK from %Eigen + + +Since %Eigen version 3.3 and later, any F77 compatible BLAS or LAPACK libraries can be used as backends for dense matrix products and dense matrix decompositions. +For instance, one can use Intel® MKL, Apple's Accelerate framework on OSX, OpenBLAS, Netlib LAPACK, etc. + +Do not miss this \link TopicUsingIntelMKL page \endlink for further discussions on the specific use of Intel® MKL (also includes VML, PARDISO, etc.) + +In order to use an external BLAS and/or LAPACK library, you must link you own application to the respective libraries and their dependencies. +For LAPACK, you must also link to the standard Lapacke library, which is used as a convenient think layer between %Eigen's C++ code and LAPACK F77 interface. Then you must activate their usage by defining one or multiple of the following macros (\b before including any %Eigen's header): + +\note For Mac users, in order to use the lapack version shipped with the Accelerate framework, you also need the lapacke library. +Using MacPorts, this is as easy as: +\code +sudo port install lapack +\endcode +and then use the following link flags: \c -framework \c Accelerate \c /opt/local/lib/lapack/liblapacke.dylib + + + + + +
\c EIGEN_USE_BLAS Enables the use of external BLAS level 2 and 3 routines (compatible with any F77 BLAS interface)
\c EIGEN_USE_LAPACKE Enables the use of external Lapack routines via the Lapacke C interface to Lapack (compatible with any F77 LAPACK interface)
\c EIGEN_USE_LAPACKE_STRICT Same as \c EIGEN_USE_LAPACKE but algorithms of lower numerical robustness are disabled. \n This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.
+ +When doing so, a number of %Eigen's algorithms are silently substituted with calls to BLAS or LAPACK routines. +These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex, and \c complex. +Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms. + +The breadth of %Eigen functionality that can be substituted is listed in the table below. + + + + + + + + + + +
Functional domainCode exampleBLAS/LAPACK routines
Matrix-matrix operations \n \c EIGEN_USE_BLAS \code +m1*m2.transpose(); +m1.selfadjointView()*m2; +m1*m2.triangularView(); +m1.selfadjointView().rankUpdate(m2,1.0); +\endcode\code +?gemm +?symm/?hemm +?trmm +dsyrk/ssyrk +\endcode
Matrix-vector operations \n \c EIGEN_USE_BLAS \code +m1.adjoint()*b; +m1.selfadjointView()*b; +m1.triangularView()*b; +\endcode\code +?gemv +?symv/?hemv +?trmv +\endcode
LU decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +v1 = m1.lu().solve(v2); +\endcode\code +?getrf +\endcode
Cholesky decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +v1 = m2.selfadjointView().llt().solve(v2); +\endcode\code +?potrf +\endcode
QR decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +m1.householderQr(); +m1.colPivHouseholderQr(); +\endcode\code +?geqrf +?geqp3 +\endcode
Singular value decomposition \n \c EIGEN_USE_LAPACKE \code +JacobiSVD svd; +svd.compute(m1, ComputeThinV); +\endcode\code +?gesvd +\endcode
Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +EigenSolver es(m1); +ComplexEigenSolver ces(m1); +SelfAdjointEigenSolver saes(m1+m1.transpose()); +GeneralizedSelfAdjointEigenSolver + gsaes(m1+m1.transpose(),m2+m2.transpose()); +\endcode\code +?gees +?gees +?syev/?heev +?syev/?heev, +?potrf +\endcode
Schur decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +RealSchur schurR(m1); +ComplexSchur schurC(m1); +\endcode\code +?gees +\endcode
+In the examples, m1 and m2 are dense matrices and v1 and v2 are dense vectors. + +*/ + +} diff --git a/include/eigen/doc/UsingNVCC.dox b/include/eigen/doc/UsingNVCC.dox new file mode 100644 index 0000000000000000000000000000000000000000..36beb2ddda68abdf1cc5980ed843be09f402a3d5 --- /dev/null +++ b/include/eigen/doc/UsingNVCC.dox @@ -0,0 +1,30 @@ + +namespace Eigen { + +/** \page TopicCUDA Using Eigen in CUDA kernels + +Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code. +This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header. +This might be useful to disable some warnings when a .cu file makes use of Eigen on the host side only. +However, in both cases, host's SIMD vectorization has to be disabled in .cu files. +It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files. + +Known issues: + + - \c nvcc with MS Visual Studio does not work (patch welcome) + + - \c nvcc 5.5 with gcc-4.7 (or greater) has issues with the standard \c \ header file. To workaround this, you can add the following before including any other files: + \code + // workaround issue between gcc >= 4.7 and cuda 5.5 + #if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7) + #undef _GLIBCXX_ATOMIC_BUILTINS + #undef _GLIBCXX_USE_INT128 + #endif + \endcode + + - On 64bits system Eigen uses \c long \c int as the default type for indexes and sizes. On CUDA device, it would make sense to default to 32 bits \c int. + However, to keep host and CUDA code compatible, this cannot be done automatically by %Eigen, and the user is thus required to define \c EIGEN_DEFAULT_DENSE_INDEX_TYPE to \c int throughout his code (or only for CUDA code if there is no interaction between host and CUDA code through %Eigen's object). + +*/ + +} diff --git a/include/eigen/doc/eigen_navtree_hacks.js b/include/eigen/doc/eigen_navtree_hacks.js new file mode 100644 index 0000000000000000000000000000000000000000..afb97edf5c70b34b42432769f53b1beac98c7e75 --- /dev/null +++ b/include/eigen/doc/eigen_navtree_hacks.js @@ -0,0 +1,247 @@ + +// generate a table of contents in the side-nav based on the h1/h2 tags of the current page. +function generate_autotoc() { + var headers = $("h1, h2"); + if(headers.length > 1) { + var toc = $("#side-nav").append(''); + toc = $("#nav-toc"); + var footer = $("#nav-path"); + var footerHeight = footer.height(); + toc = toc.append('
    '); + toc = toc.find('ul'); + var indices = new Array(); + indices[0] = 0; + indices[1] = 0; + + var h1counts = $("h1").length; + headers.each(function(i) { + var current = $(this); + var levelTag = current[0].tagName.charAt(1); + if(h1counts==0) + levelTag--; + var cur_id = current.attr("id"); + + indices[levelTag-1]+=1; + var prefix = indices[0]; + if (levelTag >1) { + prefix+="."+indices[1]; + } + + // Uncomment to add number prefixes + // current.html(prefix + " " + current.html()); + for(var l = levelTag; l < 2; ++l){ + indices[l] = 0; + } + + if(cur_id == undefined) { + current.attr('id', 'title' + i); + current.addClass('anchor'); + toc.append("
  • " + current.text() + "
  • "); + } else { + toc.append("
  • " + current.text() + "
  • "); + } + }); + resizeHeight(); + } +} + + +var global_navtree_object; + +// Overloaded to remove links to sections/subsections +function getNode(o, po) +{ + po.childrenVisited = true; + var l = po.childrenData.length-1; + for (var i in po.childrenData) { + var nodeData = po.childrenData[i]; + if((!nodeData[1]) || (nodeData[1].indexOf('#')==-1)) // <- we added this line + po.children[i] = newNode(o, po, nodeData[0], nodeData[1], nodeData[2], i==l); + } +} + +// Overloaded to adjust the size of the navtree wrt the toc +function resizeHeight() +{ + var header = $("#top"); + var sidenav = $("#side-nav"); + var content = $("#doc-content"); + var navtree = $("#nav-tree"); + var footer = $("#nav-path"); + var toc = $("#nav-toc"); + + var headerHeight = header.outerHeight(); + var footerHeight = footer.outerHeight(); + var tocHeight = toc.height(); + var windowHeight = $(window).height() - headerHeight - footerHeight; + content.css({height:windowHeight + "px"}); + navtree.css({height:(windowHeight-tocHeight) + "px"}); + sidenav.css({height:windowHeight + "px"}); +} + +// Overloaded to save the root node into global_navtree_object +function initNavTree(toroot,relpath) +{ + var o = new Object(); + global_navtree_object = o; // <- we added this line + o.toroot = toroot; + o.node = new Object(); + o.node.li = document.getElementById("nav-tree-contents"); + o.node.childrenData = NAVTREE; + o.node.children = new Array(); + o.node.childrenUL = document.createElement("ul"); + o.node.getChildrenUL = function() { return o.node.childrenUL; }; + o.node.li.appendChild(o.node.childrenUL); + o.node.depth = 0; + o.node.relpath = relpath; + o.node.expanded = false; + o.node.isLast = true; + o.node.plus_img = document.createElement("img"); + o.node.plus_img.src = relpath+"ftv2pnode.png"; + o.node.plus_img.width = 16; + o.node.plus_img.height = 22; + + if (localStorageSupported()) { + var navSync = $('#nav-sync'); + if (cachedLink()) { + showSyncOff(navSync,relpath); + navSync.removeClass('sync'); + } else { + showSyncOn(navSync,relpath); + } + navSync.click(function(){ toggleSyncButton(relpath); }); + } + + navTo(o,toroot,window.location.hash,relpath); + + $(window).bind('hashchange', function(){ + if (window.location.hash && window.location.hash.length>1){ + var a; + if ($(location).attr('hash')){ + var clslink=stripPath($(location).attr('pathname'))+':'+ + $(location).attr('hash').substring(1); + a=$('.item a[class$="'+clslink+'"]'); + } + if (a==null || !$(a).parent().parent().hasClass('selected')){ + $('.item').removeClass('selected'); + $('.item').removeAttr('id'); + } + var link=stripPath2($(location).attr('pathname')); + navTo(o,link,$(location).attr('hash'),relpath); + } else if (!animationInProgress) { + $('#doc-content').scrollTop(0); + $('.item').removeClass('selected'); + $('.item').removeAttr('id'); + navTo(o,toroot,window.location.hash,relpath); + } + }) + + $(window).on("load", showRoot); +} + +// return false if the the node has no children at all, or has only section/subsection children +function checkChildrenData(node) { + if (!(typeof(node.childrenData)==='string')) { + for (var i in node.childrenData) { + var url = node.childrenData[i][1]; + if(url.indexOf("#")==-1) + return true; + } + return false; + } + return (node.childrenData); +} + +// Modified to: +// 1 - remove the root node +// 2 - remove the section/subsection children +function createIndent(o,domNode,node,level) +{ + var level=-2; // <- we replaced level=-1 by level=-2 + var n = node; + while (n.parentNode) { level++; n=n.parentNode; } + if (checkChildrenData(node)) { // <- we modified this line to use checkChildrenData(node) instead of node.childrenData + var imgNode = document.createElement("span"); + imgNode.className = 'arrow'; + imgNode.style.paddingLeft=(16*level).toString()+'px'; + imgNode.innerHTML=arrowRight; + node.plus_img = imgNode; + node.expandToggle = document.createElement("a"); + node.expandToggle.href = "javascript:void(0)"; + node.expandToggle.onclick = function() { + if (node.expanded) { + $(node.getChildrenUL()).slideUp("fast"); + node.plus_img.innerHTML=arrowRight; + node.expanded = false; + } else { + expandNode(o, node, false, false); + } + } + node.expandToggle.appendChild(imgNode); + domNode.appendChild(node.expandToggle); + } else { + var span = document.createElement("span"); + span.className = 'arrow'; + span.style.width = 16*(level+1)+'px'; + span.innerHTML = ' '; + domNode.appendChild(span); + } +} + +// Overloaded to automatically expand the selected node +function selectAndHighlight(hash,n) +{ + var a; + if (hash) { + var link=stripPath($(location).attr('pathname'))+':'+hash.substring(1); + a=$('.item a[class$="'+link+'"]'); + } + if (a && a.length) { + a.parent().parent().addClass('selected'); + a.parent().parent().attr('id','selected'); + highlightAnchor(); + } else if (n) { + $(n.itemDiv).addClass('selected'); + $(n.itemDiv).attr('id','selected'); + } + if ($('#nav-tree-contents .item:first').hasClass('selected')) { + $('#nav-sync').css('top','30px'); + } else { + $('#nav-sync').css('top','5px'); + } + expandNode(global_navtree_object, n, true, true); // <- we added this line + showRoot(); +} + + +$(document).ready(function() { + + generate_autotoc(); + + (function (){ // wait until the first "selected" element has been created + try { + + // this line will triger an exception if there is no #selected element, i.e., before the tree structure is complete. + document.getElementById("selected").className = "item selected"; + + // ok, the default tree has been created, we can keep going... + + // expand the "Chapters" node + if(window.location.href.indexOf('unsupported')==-1) + expandNode(global_navtree_object, global_navtree_object.node.children[0].children[2], true, true); + else + expandNode(global_navtree_object, global_navtree_object.node.children[0].children[1], true, true); + + // Hide the root node "Eigen" + $(document.getElementsByClassName('index.html')[0]).parent().parent().css({display:"none"}); + + } catch (err) { + setTimeout(arguments.callee, 10); + } + })(); + + $(window).on("load", resizeHeight); +}); + diff --git a/include/eigen/doc/eigendoxy.css b/include/eigen/doc/eigendoxy.css new file mode 100644 index 0000000000000000000000000000000000000000..c746194e6c627c118991880f2fe423d1b1049b46 --- /dev/null +++ b/include/eigen/doc/eigendoxy.css @@ -0,0 +1,233 @@ + +/******** Eigen specific CSS code ************/ + +/**** Styles removing elements ****/ + +/* remove the "modules|classes" link for module pages (they are already in the TOC) */ +div.summary { + display:none; +} + +/* remove */ +div.contents hr { + display:none; +} + +/**** ****/ + +p, dl.warning, dl.attention, dl.note +{ + max-width:60em; + text-align:justify; +} + +li { + max-width:55em; + text-align:justify; +} + +img { + border: 0; +} + +div.fragment { + display:table; /* this allows the element to be larger than its parent */ + padding: 0pt; +} +pre.fragment { + border: 1px solid #cccccc; + + margin: 2px 0px 2px 0px; + padding: 3px 5px 3px 5px; +} + + + +/* Common style for all Eigen's tables */ + +table.example, table.manual, table.manual-vl, table.manual-hl { + max-width:100%; + border-collapse: collapse; + border-style: solid; + border-width: 1px; + border-color: #cccccc; + font-size: 1em; + + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -moz-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); +} + +table.example th, table.manual th, table.manual-vl th, table.manual-hl th { + padding: 0.5em 0.5em 0.5em 0.5em; + text-align: left; + padding-right: 1em; + color: #555555; + background-color: #F4F4E5; + + background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.3,#FFFFFF), color-stop(0.30,#FFFFFF), color-stop(0.98,#F4F4E5), to(#ECECDE)); + background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 30%, #F4F4E5 98%, #ECECDE); + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#FFFFFF', endColorstr='#F4F4E5'); +} + +table.example td, table.manual td, table.manual-vl td, table.manual-hl td { + vertical-align:top; + border-width: 1px; + border-color: #cccccc; +} + +/* header of headers */ +table th.meta { + text-align:center; + font-size: 1.2em; + background-color:#FFFFFF; +} + +/* intermediate header */ +table th.inter { + text-align:left; + background-color:#FFFFFF; + background-image:none; + border-style:solid solid solid solid; + border-width: 1px; + border-color: #cccccc; +} + +/** class for example / output tables **/ + +table.example { +} + +table.example th { +} + +table.example td { + padding: 0.5em 0.5em 0.5em 0.5em; + vertical-align:top; +} + +/* standard class for the manual */ + +table.manual, table.manual-vl, table.manual-hl { + padding: 0.2em 0em 0.5em 0em; +} + +table.manual th, table.manual-vl th, table.manual-hl th { + margin: 0em 0em 0.3em 0em; +} + +table.manual td, table.manual-vl td, table.manual-hl td { + padding: 0.3em 0.5em 0.3em 0.5em; + vertical-align:top; + border-width: 1px; +} + +table.manual td.alt, table.manual tr.alt, table.manual-vl td.alt, table.manual-vl tr.alt { + background-color: #F4F4E5; +} + +table.manual-vl th, table.manual-vl td, table.manual-vl td.alt { + border-color: #cccccc; + border-width: 1px; + border-style: none solid none solid; +} + +table.manual-vl th.inter { + border-style: solid solid solid solid; +} + +table.manual-hl td { + border-color: #cccccc; + border-width: 1px; + border-style: solid none solid none; +} + +table td.code { + font-family: monospace; +} + +h2 { + margin-top:2em; + border-style: none none solid none; + border-width: 1px; + border-color: #cccccc; +} + +/**** Table of content in the side-nav ****/ + + +div.toc { + margin:0; + padding: 0.3em 0 0 0; + width:100%; + float: none; + border-radius:0px; + border-style: solid none none none; + max-height:50%; + overflow-y: scroll; +} + +div.toc h3 { + margin-left: 0.5em; + margin-bottom: 0.2em; +} + +div.toc ul { + margin: 0.2em 0 0.4em 0.5em; +} + +span.cpp11,span.cpp14,span.cpp17 { + color: #119911; + font-weight: bold; +} + +.newin3x { + color: #a37c1a; + font-weight: bold; +} + +div.warningbox { + max-width:60em; + border-style: solid solid solid solid; + border-color: red; + border-width: 3px; +} + +/**** old Eigen's styles ****/ + + +table.tutorial_code td { + border-color: transparent; /* required for Firefox */ + padding: 3pt 5pt 3pt 5pt; + vertical-align: top; +} + + +/* Whenever doxygen meets a '\n' or a '
    ', it will put + * the text containing the character into a

    . + * This little hack together with table.tutorial_code td.note + * aims at fixing this issue. */ +table.tutorial_code td.note p.starttd { + margin: 0px; + border: none; + padding: 0px; +} + +div.eimainmenu { + text-align: center; +} + +/* center version number on main page */ +h3.version { + text-align: center; +} + + +td.width20em p.endtd { + width: 20em; +} + +/* needed for huge screens */ +.ui-resizable-e { + background-repeat: repeat-y; +} diff --git a/include/eigen/doc/eigendoxy_header.html.in b/include/eigen/doc/eigendoxy_header.html.in new file mode 100644 index 0000000000000000000000000000000000000000..e377b26f870b9c4a48c762b4e9e55e844a83dc39 --- /dev/null +++ b/include/eigen/doc/eigendoxy_header.html.in @@ -0,0 +1,82 @@ + + + + + + + + +$projectname: $title +$title + + + + + + + + + + + + + + +$treeview +$search +$mathjax +$darkmode + + +$extrastylesheet + + + + +

    + + + +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    $projectname +  $projectnumber +
    +
    $projectbrief
    +
    +
    $projectbrief
    +
    $searchbox
    $searchbox
    +
    + + diff --git a/include/eigen/doc/eigendoxy_tabs.css b/include/eigen/doc/eigendoxy_tabs.css new file mode 100644 index 0000000000000000000000000000000000000000..21920562a831f80ad5df7d913b1d3d64db994510 --- /dev/null +++ b/include/eigen/doc/eigendoxy_tabs.css @@ -0,0 +1,59 @@ +.tabs, .tabs2, .tabs3 { + background-image: url('tab_b.png'); + width: 100%; + z-index: 101; + font-size: 13px; +} + +.tabs2 { + font-size: 10px; +} +.tabs3 { + font-size: 9px; +} + +.tablist { + margin: 0; + padding: 0; + display: table; +} + +.tablist li { + float: left; + display: table-cell; + background-image: url('tab_b.png'); + line-height: 36px; + list-style: none; +} + +.tablist a { + display: block; + padding: 0 20px; + font-weight: bold; + background-image:url('tab_s.png'); + background-repeat:no-repeat; + background-position:right; + color: #283A5D; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + text-decoration: none; + outline: none; +} + +.tabs3 .tablist a { + padding: 0 10px; +} + +.tablist a:hover { + background-image: url('tab_h.png'); + background-repeat:repeat-x; + color: #fff; + text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); + text-decoration: none; +} + +.tablist li.current a { + background-image: url('tab_a.png'); + background-repeat:repeat-x; + color: #fff; + text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); +} diff --git a/include/eigen/doc/tutorial.cpp b/include/eigen/doc/tutorial.cpp new file mode 100644 index 0000000000000000000000000000000000000000..62be7c2707b746c71c4df7cdaf419cfcfe1d35fe --- /dev/null +++ b/include/eigen/doc/tutorial.cpp @@ -0,0 +1,62 @@ +#include + +int main(int argc, char *argv[]) +{ + std::cout.precision(2); + + // demo static functions + Eigen::Matrix3f m3 = Eigen::Matrix3f::Random(); + Eigen::Matrix4f m4 = Eigen::Matrix4f::Identity(); + + std::cout << "*** Step 1 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; + + // demo non-static set... functions + m4.setZero(); + m3.diagonal().setOnes(); + + std::cout << "*** Step 2 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; + + // demo fixed-size block() expression as lvalue and as rvalue + m4.block<3,3>(0,1) = m3; + m3.row(2) = m4.block<1,3>(2,0); + + std::cout << "*** Step 3 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; + + // demo dynamic-size block() + { + int rows = 3, cols = 3; + m4.block(0,1,3,3).setIdentity(); + std::cout << "*** Step 4 ***\nm4:\n" << m4 << std::endl; + } + + // demo vector blocks + m4.diagonal().block(1,2).setOnes(); + std::cout << "*** Step 5 ***\nm4.diagonal():\n" << m4.diagonal() << std::endl; + std::cout << "m4.diagonal().start(3)\n" << m4.diagonal().start(3) << std::endl; + + // demo coeff-wise operations + m4 = m4.cwise()*m4; + m3 = m3.cwise().cos(); + std::cout << "*** Step 6 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; + + // sums of coefficients + std::cout << "*** Step 7 ***\n m4.sum(): " << m4.sum() << std::endl; + std::cout << "m4.col(2).sum(): " << m4.col(2).sum() << std::endl; + std::cout << "m4.colwise().sum():\n" << m4.colwise().sum() << std::endl; + std::cout << "m4.rowwise().sum():\n" << m4.rowwise().sum() << std::endl; + + // demo intelligent auto-evaluation + m4 = m4 * m4; // auto-evaluates so no aliasing problem (performance penalty is low) + Eigen::Matrix4f other = (m4 * m4).lazy(); // forces lazy evaluation + m4 = m4 + m4; // here Eigen goes for lazy evaluation, as with most expressions + m4 = -m4 + m4 + 5 * m4; // same here, Eigen chooses lazy evaluation for all that. + m4 = m4 * (m4 + m4); // here Eigen chooses to first evaluate m4 + m4 into a temporary. + // indeed, here it is an optimization to cache this intermediate result. + m3 = m3 * m4.block<3,3>(1,1); // here Eigen chooses NOT to evaluate block() into a temporary + // because accessing coefficients of that block expression is not more costly than accessing + // coefficients of a plain matrix. + m4 = m4 * m4.transpose(); // same here, lazy evaluation of the transpose. + m4 = m4 * m4.transpose().eval(); // forces immediate evaluation of the transpose + + std::cout << "*** Step 8 ***\nm3:\n" << m3 << "\nm4:\n" << m4 << std::endl; +} diff --git a/include/eigen/scripts/CMakeLists.txt b/include/eigen/scripts/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d9a631a9dd0d46b726703af0e51d5f49fddf029 --- /dev/null +++ b/include/eigen/scripts/CMakeLists.txt @@ -0,0 +1,6 @@ +get_property(EIGEN_TESTS_LIST GLOBAL PROPERTY EIGEN_TESTS_LIST) +configure_file(buildtests.in ${CMAKE_BINARY_DIR}/buildtests.sh @ONLY) + +configure_file(check.in ${CMAKE_BINARY_DIR}/check.sh COPYONLY) +configure_file(debug.in ${CMAKE_BINARY_DIR}/debug.sh COPYONLY) +configure_file(release.in ${CMAKE_BINARY_DIR}/release.sh COPYONLY) diff --git a/include/eigen/scripts/cdashtesting.cmake.in b/include/eigen/scripts/cdashtesting.cmake.in new file mode 100644 index 0000000000000000000000000000000000000000..0bf0fac2a2e179cd6ce7b4df436ead091cfd0161 --- /dev/null +++ b/include/eigen/scripts/cdashtesting.cmake.in @@ -0,0 +1,49 @@ + +set(CTEST_SOURCE_DIRECTORY "@CMAKE_SOURCE_DIR@") +set(CTEST_BINARY_DIRECTORY "@CMAKE_BINARY_DIR@") +set(CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@") +set(CTEST_BUILD_NAME "@BUILDNAME@") +set(CTEST_SITE "@SITE@") + +set(MODEL Experimental) +if(${CTEST_SCRIPT_ARG} MATCHES Nightly) + set(MODEL Nightly) +elseif(${CTEST_SCRIPT_ARG} MATCHES Continuous) + set(MODEL Continuous) +endif() + +find_program(CTEST_GIT_COMMAND NAMES git) +set(CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}") + +ctest_start(${MODEL} ${CTEST_SOURCE_DIRECTORY} ${CTEST_BINARY_DIRECTORY}) + +ctest_update(SOURCE "${CTEST_SOURCE_DIRECTORY}") +ctest_submit(PARTS Update Notes) + +# to get CTEST_PROJECT_SUBPROJECTS definition: +include("${CTEST_SOURCE_DIRECTORY}/CTestConfig.cmake") + +foreach(subproject ${CTEST_PROJECT_SUBPROJECTS}) + message("") + message("Process ${subproject}") + + set_property(GLOBAL PROPERTY SubProject ${subproject}) + set_property(GLOBAL PROPERTY Label ${subproject}) + + ctest_configure(BUILD ${CTEST_BINARY_DIRECTORY} SOURCE ${CTEST_SOURCE_DIRECTORY} ) + ctest_submit(PARTS Configure) + + set(CTEST_BUILD_TARGET "Build${subproject}") + message("Build ${CTEST_BUILD_TARGET}") + ctest_build(BUILD "${CTEST_BINARY_DIRECTORY}" APPEND) + # builds target ${CTEST_BUILD_TARGET} + ctest_submit(PARTS Build) + + ctest_test(BUILD "${CTEST_BINARY_DIRECTORY}" INCLUDE_LABEL "${subproject}" ) + # runs only tests that have a LABELS property matching "${subproject}" + + ctest_coverage(BUILD "${CTEST_BINARY_DIRECTORY}" LABELS "${subproject}" ) + + ctest_submit(PARTS Test) + +endforeach() diff --git a/include/eigen/scripts/debug.in b/include/eigen/scripts/debug.in new file mode 100644 index 0000000000000000000000000000000000000000..d339d3d1f9067ad52197ae05062aa36d94371506 --- /dev/null +++ b/include/eigen/scripts/debug.in @@ -0,0 +1,3 @@ +#!/bin/sh + +cmake -DCMAKE_BUILD_TYPE=Debug . diff --git a/include/eigen/scripts/eigen_gen_credits.cpp b/include/eigen/scripts/eigen_gen_credits.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f2e81631dd4b0118fdcb4844cb94e0c3b88538bf --- /dev/null +++ b/include/eigen/scripts/eigen_gen_credits.cpp @@ -0,0 +1,232 @@ +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +// this function takes a line that may contain a name and/or email address, +// and returns just the name, while fixing the "bad cases". +std::string contributor_name(const std::string& line) +{ + string result; + + // let's first take care of the case of isolated email addresses, like + // "user@localhost.localdomain" entries + if(line.find("markb@localhost.localdomain") != string::npos) + { + return "Mark Borgerding"; + } + + if(line.find("kayhman@contact.intra.cea.fr") != string::npos) + { + return "Guillaume Saupin"; + } + + // from there on we assume that we have a entry of the form + // either: + // Bla bli Blurp + // or: + // Bla bli Blurp + + size_t position_of_email_address = line.find_first_of('<'); + if(position_of_email_address != string::npos) + { + // there is an e-mail address in <...>. + + // Hauke once committed as "John Smith", fix that. + if(line.find("hauke.heibel") != string::npos) + result = "Hauke Heibel"; + else + { + // just remove the e-mail address + result = line.substr(0, position_of_email_address); + } + } + else + { + // there is no e-mail address in <...>. + + if(line.find("convert-repo") != string::npos) + result = ""; + else + result = line; + } + + // remove trailing spaces + size_t length = result.length(); + while(length >= 1 && result[length-1] == ' ') result.erase(--length); + + return result; +} + +// parses hg churn output to generate a contributors map. +map contributors_map_from_churn_output(const char *filename) +{ + map contributors_map; + + string line; + ifstream churn_out; + churn_out.open(filename, ios::in); + while(!getline(churn_out,line).eof()) + { + // remove the histograms "******" that hg churn may draw at the end of some lines + size_t first_star = line.find_first_of('*'); + if(first_star != string::npos) line.erase(first_star); + + // remove trailing spaces + size_t length = line.length(); + while(length >= 1 && line[length-1] == ' ') line.erase(--length); + + // now the last space indicates where the number starts + size_t last_space = line.find_last_of(' '); + + // get the number (of changesets or of modified lines for each contributor) + int number; + istringstream(line.substr(last_space+1)) >> number; + + // get the name of the contributor + line.erase(last_space); + string name = contributor_name(line); + + map::iterator it = contributors_map.find(name); + // if new contributor, insert + if(it == contributors_map.end()) + contributors_map.insert(pair(name, number)); + // if duplicate, just add the number + else + it->second += number; + } + churn_out.close(); + + return contributors_map; +} + +// find the last name, i.e. the last word. +// for "van den Schbling" types of last names, that's not a problem, that's actually what we want. +string lastname(const string& name) +{ + size_t last_space = name.find_last_of(' '); + if(last_space >= name.length()-1) return name; + else return name.substr(last_space+1); +} + +struct contributor +{ + string name; + int changedlines; + int changesets; + string url; + string misc; + + contributor() : changedlines(0), changesets(0) {} + + bool operator < (const contributor& other) + { + return lastname(name).compare(lastname(other.name)) < 0; + } +}; + +void add_online_info_into_contributors_list(list& contributors_list, const char *filename) +{ + string line; + ifstream online_info; + online_info.open(filename, ios::in); + while(!getline(online_info,line).eof()) + { + string hgname, realname, url, misc; + + size_t last_bar = line.find_last_of('|'); + if(last_bar == string::npos) continue; + if(last_bar < line.length()) + misc = line.substr(last_bar+1); + line.erase(last_bar); + + last_bar = line.find_last_of('|'); + if(last_bar == string::npos) continue; + if(last_bar < line.length()) + url = line.substr(last_bar+1); + line.erase(last_bar); + + last_bar = line.find_last_of('|'); + if(last_bar == string::npos) continue; + if(last_bar < line.length()) + realname = line.substr(last_bar+1); + line.erase(last_bar); + + hgname = line; + + // remove the example line + if(hgname.find("MercurialName") != string::npos) continue; + + list::iterator it; + for(it=contributors_list.begin(); it != contributors_list.end() && it->name != hgname; ++it) + {} + + if(it == contributors_list.end()) + { + contributor c; + c.name = realname; + c.url = url; + c.misc = misc; + contributors_list.push_back(c); + } + else + { + it->name = realname; + it->url = url; + it->misc = misc; + } + } +} + +int main() +{ + // parse the hg churn output files + map contributors_map_for_changedlines = contributors_map_from_churn_output("churn-changedlines.out"); + //map contributors_map_for_changesets = contributors_map_from_churn_output("churn-changesets.out"); + + // merge into the contributors list + list contributors_list; + map::iterator it; + for(it=contributors_map_for_changedlines.begin(); it != contributors_map_for_changedlines.end(); ++it) + { + contributor c; + c.name = it->first; + c.changedlines = it->second; + c.changesets = 0; //contributors_map_for_changesets.find(it->first)->second; + contributors_list.push_back(c); + } + + add_online_info_into_contributors_list(contributors_list, "online-info.out"); + + contributors_list.sort(); + + cout << "{| cellpadding=\"5\"\n"; + cout << "!\n"; + cout << "! Lines changed\n"; + cout << "!\n"; + + list::iterator itc; + int i = 0; + for(itc=contributors_list.begin(); itc != contributors_list.end(); ++itc) + { + if(itc->name.length() == 0) continue; + if(i%2) cout << "|-\n"; + else cout << "|- style=\"background:#FFFFD0\"\n"; + if(itc->url.length()) + cout << "| [" << itc->url << " " << itc->name << "]\n"; + else + cout << "| " << itc->name << "\n"; + if(itc->changedlines) + cout << "| " << itc->changedlines << "\n"; + else + cout << "| (no information)\n"; + cout << "| " << itc->misc << "\n"; + i++; + } + cout << "|}" << endl; +} diff --git a/include/eigen/scripts/eigen_gen_docs b/include/eigen/scripts/eigen_gen_docs new file mode 100644 index 0000000000000000000000000000000000000000..787dcb325faf1e052fc80e0da18f4c213871d3bb --- /dev/null +++ b/include/eigen/scripts/eigen_gen_docs @@ -0,0 +1,24 @@ +#!/bin/sh + +# configuration +# You should call this script with USER set as you want, else some default +# will be used +USER=${USER:-'orzel'} +UPLOAD_DIR=dox-devel + +#ulimit -v 1024000 + +# step 1 : build +rm build/doc/html -Rf +mkdir build -p +(cd build && cmake .. && make doc) || { echo "make failed"; exit 1; } + +#step 2 : upload +# (the '/' at the end of path is very important, see rsync documentation) +rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR/ || { echo "upload failed"; exit 1; } + +#step 3 : fix the perm +ssh $USER@ssh.tuxfamily.org "chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR" || { echo "perm failed"; exit 1; } + +echo "Uploaded successfully" + diff --git a/include/eigen/scripts/eigen_gen_split_test_help.cmake b/include/eigen/scripts/eigen_gen_split_test_help.cmake new file mode 100644 index 0000000000000000000000000000000000000000..e43f5aabec38103cf449219b26f0f0c07d4cb461 --- /dev/null +++ b/include/eigen/scripts/eigen_gen_split_test_help.cmake @@ -0,0 +1,11 @@ +#!cmake -P +file(WRITE split_test_helper.h "") +foreach(i RANGE 1 999) + file(APPEND split_test_helper.h + "#if defined(EIGEN_TEST_PART_${i}) || defined(EIGEN_TEST_PART_ALL)\n" + "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n" + "#else\n" + "#define CALL_SUBTEST_${i}(FUNC)\n" + "#endif\n\n" + ) +endforeach() \ No newline at end of file diff --git a/include/eigen/scripts/eigen_monitor_perf.sh b/include/eigen/scripts/eigen_monitor_perf.sh new file mode 100644 index 0000000000000000000000000000000000000000..8f3425dafcf077602d48d16023606df05938e7ca --- /dev/null +++ b/include/eigen/scripts/eigen_monitor_perf.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# This is a script example to automatically update and upload performance unit tests. +# The following five variables must be adjusted to match your settings. + +USER='ggael' +UPLOAD_DIR=perf_monitoring/ggaelmacbook26 +EIGEN_SOURCE_PATH=$HOME/Eigen/eigen +export PREFIX="haswell-fma" +export CXX_FLAGS="-mfma -w" + +#### + +BENCH_PATH=$EIGEN_SOURCE_PATH/bench/perf_monitoring/$PREFIX +PREVPATH=$(pwd) +cd $EIGEN_SOURCE_PATH/bench/perf_monitoring && ./runall.sh "Haswell 2.6GHz, FMA, Apple's clang" "$@" +cd $PREVPATH || exit 1 + +ALLFILES="$BENCH_PATH/*.png $BENCH_PATH/*.html $BENCH_PATH/index.html $BENCH_PATH/s1.js $BENCH_PATH/s2.js" + +# (the '/' at the end of path is very important, see rsync documentation) +rsync -az --no-p --delete $ALLFILES $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR/ || { echo "upload failed"; exit 1; } + +# fix the perm +ssh $USER@ssh.tuxfamily.org "chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/perf_monitoring" || { echo "perm failed"; exit 1; } diff --git a/include/eigen/scripts/release.in b/include/eigen/scripts/release.in new file mode 100644 index 0000000000000000000000000000000000000000..db2d9d940283f607a103a5ae979f6921d5a8bea1 --- /dev/null +++ b/include/eigen/scripts/release.in @@ -0,0 +1,3 @@ +#!/bin/sh + +cmake -DCMAKE_BUILD_TYPE=Release . diff --git a/include/eigen/scripts/relicense.py b/include/eigen/scripts/relicense.py new file mode 100644 index 0000000000000000000000000000000000000000..8a5265f1f171ab8afc7cc7892a773099f7723d84 --- /dev/null +++ b/include/eigen/scripts/relicense.py @@ -0,0 +1,69 @@ +# This file is part of Eigen, a lightweight C++ template library +# for linear algebra. +# +# Copyright (C) 2012 Keir Mierle +# +# This Source Code Form is subject to the terms of the Mozilla +# Public License v. 2.0. If a copy of the MPL was not distributed +# with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Author: mierle@gmail.com (Keir Mierle) +# +# Make the long-awaited conversion to MPL. + +lgpl3_header = ''' +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . +''' + +mpl2_header = """ +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +""" + +import os +import sys + +exclusions = set(['relicense.py']) + +def update(text): + if text.find(lgpl3_header) == -1: + return text, False + return text.replace(lgpl3_header, mpl2_header), True + +rootdir = sys.argv[1] +for root, sub_folders, files in os.walk(rootdir): + for basename in files: + if basename in exclusions: + print 'SKIPPED', filename + continue + filename = os.path.join(root, basename) + fo = file(filename) + text = fo.read() + fo.close() + + text, updated = update(text) + if updated: + fo = file(filename, "w") + fo.write(text) + fo.close() + print 'UPDATED', filename + else: + print ' ', filename diff --git a/include/eigen/test/array_cwise.cpp b/include/eigen/test/array_cwise.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bbb74b1a64eeeaf21ac58765471762eca08dc080 --- /dev/null +++ b/include/eigen/test/array_cwise.cpp @@ -0,0 +1,726 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + + +// Test the corner cases of pow(x, y) for real types. +template +void pow_test() { + const Scalar zero = Scalar(0); + const Scalar eps = Eigen::NumTraits::epsilon(); + const Scalar one = Scalar(1); + const Scalar two = Scalar(2); + const Scalar three = Scalar(3); + const Scalar sqrt_half = Scalar(std::sqrt(0.5)); + const Scalar sqrt2 = Scalar(std::sqrt(2)); + const Scalar inf = Eigen::NumTraits::infinity(); + const Scalar nan = Eigen::NumTraits::quiet_NaN(); + const Scalar denorm_min = EIGEN_ARCH_ARM ? zero : std::numeric_limits::denorm_min(); + const Scalar min = (std::numeric_limits::min)(); + const Scalar max = (std::numeric_limits::max)(); + const Scalar max_exp = (static_cast(int(Eigen::NumTraits::max_exponent())) * Scalar(EIGEN_LN2)) / eps; + + const static Scalar abs_vals[] = {zero, + denorm_min, + min, + eps, + sqrt_half, + one, + sqrt2, + two, + three, + max_exp, + max, + inf, + nan}; + const int abs_cases = 13; + const int num_cases = 2*abs_cases * 2*abs_cases; + // Repeat the same value to make sure we hit the vectorized path. + const int num_repeats = 32; + Array x(num_repeats, num_cases); + Array y(num_repeats, num_cases); + int count = 0; + for (int i = 0; i < abs_cases; ++i) { + const Scalar abs_x = abs_vals[i]; + for (int sign_x = 0; sign_x < 2; ++sign_x) { + Scalar x_case = sign_x == 0 ? -abs_x : abs_x; + for (int j = 0; j < abs_cases; ++j) { + const Scalar abs_y = abs_vals[j]; + for (int sign_y = 0; sign_y < 2; ++sign_y) { + Scalar y_case = sign_y == 0 ? -abs_y : abs_y; + for (int repeat = 0; repeat < num_repeats; ++repeat) { + x(repeat, count) = x_case; + y(repeat, count) = y_case; + } + ++count; + } + } + } + } + + Array actual = x.pow(y); + const Scalar tol = test_precision(); + bool all_pass = true; + for (int i = 0; i < 1; ++i) { + for (int j = 0; j < num_cases; ++j) { + Scalar e = static_cast(std::pow(x(i,j), y(i,j))); + Scalar a = actual(i, j); +#if EIGEN_ARCH_ARM + // Work around NEON flush-to-zero mode + // if ref returns a subnormal value and Eigen returns 0, then skip the test + if (a == Scalar(0) && + (e > -(std::numeric_limits::min)() && e < (std::numeric_limits::min)() && + e >= -std::numeric_limits::denorm_min() && e <= std::numeric_limits::denorm_min())) { + continue; + } +#endif + bool success = (a == e) || ((numext::isfinite)(e) && internal::isApprox(a, e, tol)) || + ((numext::isnan)(a) && (numext::isnan)(e)); + all_pass &= success; + if (!success) { + std::cout << "pow(" << x(i,j) << "," << y(i,j) << ") = " << a << " != " << e << std::endl; + } + } + } + VERIFY(all_pass); +} + +template void array(const ArrayType& m) +{ + typedef typename ArrayType::Scalar Scalar; + typedef typename ArrayType::RealScalar RealScalar; + typedef Array ColVectorType; + typedef Array RowVectorType; + + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2 = ArrayType::Random(rows, cols), + m3(rows, cols); + ArrayType m4 = m1; // copy constructor + VERIFY_IS_APPROX(m1, m4); + + ColVectorType cv1 = ColVectorType::Random(rows); + RowVectorType rv1 = RowVectorType::Random(cols); + + Scalar s1 = internal::random(), + s2 = internal::random(); + + // scalar addition + VERIFY_IS_APPROX(m1 + s1, s1 + m1); + VERIFY_IS_APPROX(m1 + s1, ArrayType::Constant(rows,cols,s1) + m1); + VERIFY_IS_APPROX(s1 - m1, (-m1)+s1 ); + VERIFY_IS_APPROX(m1 - s1, m1 - ArrayType::Constant(rows,cols,s1)); + VERIFY_IS_APPROX(s1 - m1, ArrayType::Constant(rows,cols,s1) - m1); + VERIFY_IS_APPROX((m1*Scalar(2)) - s2, (m1+m1) - ArrayType::Constant(rows,cols,s2) ); + m3 = m1; + m3 += s2; + VERIFY_IS_APPROX(m3, m1 + s2); + m3 = m1; + m3 -= s1; + VERIFY_IS_APPROX(m3, m1 - s1); + + // scalar operators via Maps + m3 = m1; + ArrayType::Map(m1.data(), m1.rows(), m1.cols()) -= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); + VERIFY_IS_APPROX(m1, m3 - m2); + + m3 = m1; + ArrayType::Map(m1.data(), m1.rows(), m1.cols()) += ArrayType::Map(m2.data(), m2.rows(), m2.cols()); + VERIFY_IS_APPROX(m1, m3 + m2); + + m3 = m1; + ArrayType::Map(m1.data(), m1.rows(), m1.cols()) *= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); + VERIFY_IS_APPROX(m1, m3 * m2); + + m3 = m1; + m2 = ArrayType::Random(rows,cols); + m2 = (m2==0).select(1,m2); + ArrayType::Map(m1.data(), m1.rows(), m1.cols()) /= ArrayType::Map(m2.data(), m2.rows(), m2.cols()); + VERIFY_IS_APPROX(m1, m3 / m2); + + // reductions + VERIFY_IS_APPROX(m1.abs().colwise().sum().sum(), m1.abs().sum()); + VERIFY_IS_APPROX(m1.abs().rowwise().sum().sum(), m1.abs().sum()); + using std::abs; + VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.colwise().sum().sum() - m1.sum()), m1.abs().sum()); + VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum()); + if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision())) + VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum()); + VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op())); + + // vector-wise ops + m3 = m1; + VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1); + m3 = m1; + VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1); + m3 = m1; + VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1); + m3 = m1; + VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1); + + // Conversion from scalar + VERIFY_IS_APPROX((m3 = s1), ArrayType::Constant(rows,cols,s1)); + VERIFY_IS_APPROX((m3 = 1), ArrayType::Constant(rows,cols,1)); + VERIFY_IS_APPROX((m3.topLeftCorner(rows,cols) = 1), ArrayType::Constant(rows,cols,1)); + typedef Array FixedArrayType; + { + FixedArrayType f1(s1); + VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); + FixedArrayType f2(numext::real(s1)); + VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); + FixedArrayType f3((int)100*numext::real(s1)); + VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); + f1.setRandom(); + FixedArrayType f4(f1.data()); + VERIFY_IS_APPROX(f4, f1); + } + #if EIGEN_HAS_CXX11 + { + FixedArrayType f1{s1}; + VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1)); + FixedArrayType f2{numext::real(s1)}; + VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1))); + FixedArrayType f3{(int)100*numext::real(s1)}; + VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1))); + f1.setRandom(); + FixedArrayType f4{f1.data()}; + VERIFY_IS_APPROX(f4, f1); + } + #endif + + // pow + VERIFY_IS_APPROX(m1.pow(2), m1.square()); + VERIFY_IS_APPROX(pow(m1,2), m1.square()); + VERIFY_IS_APPROX(m1.pow(3), m1.cube()); + VERIFY_IS_APPROX(pow(m1,3), m1.cube()); + VERIFY_IS_APPROX((-m1).pow(3), -m1.cube()); + VERIFY_IS_APPROX(pow(2*m1,3), 8*m1.cube()); + ArrayType exponents = ArrayType::Constant(rows, cols, RealScalar(2)); + VERIFY_IS_APPROX(Eigen::pow(m1,exponents), m1.square()); + VERIFY_IS_APPROX(m1.pow(exponents), m1.square()); + VERIFY_IS_APPROX(Eigen::pow(2*m1,exponents), 4*m1.square()); + VERIFY_IS_APPROX((2*m1).pow(exponents), 4*m1.square()); + VERIFY_IS_APPROX(Eigen::pow(m1,2*exponents), m1.square().square()); + VERIFY_IS_APPROX(m1.pow(2*exponents), m1.square().square()); + VERIFY_IS_APPROX(Eigen::pow(m1(0,0), exponents), ArrayType::Constant(rows,cols,m1(0,0)*m1(0,0))); + + // Check possible conflicts with 1D ctor + typedef Array OneDArrayType; + { + OneDArrayType o1(rows); + VERIFY(o1.size()==rows); + OneDArrayType o2(static_cast(rows)); + VERIFY(o2.size()==rows); + } + #if EIGEN_HAS_CXX11 + { + OneDArrayType o1{rows}; + VERIFY(o1.size()==rows); + OneDArrayType o4{int(rows)}; + VERIFY(o4.size()==rows); + } + #endif + // Check possible conflicts with 2D ctor + typedef Array TwoDArrayType; + typedef Array ArrayType2; + { + TwoDArrayType o1(rows,cols); + VERIFY(o1.rows()==rows); + VERIFY(o1.cols()==cols); + TwoDArrayType o2(static_cast(rows),static_cast(cols)); + VERIFY(o2.rows()==rows); + VERIFY(o2.cols()==cols); + + ArrayType2 o3(rows,cols); + VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols)); + ArrayType2 o4(static_cast(rows),static_cast(cols)); + VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols)); + } + #if EIGEN_HAS_CXX11 + { + TwoDArrayType o1{rows,cols}; + VERIFY(o1.rows()==rows); + VERIFY(o1.cols()==cols); + TwoDArrayType o2{int(rows),int(cols)}; + VERIFY(o2.rows()==rows); + VERIFY(o2.cols()==cols); + + ArrayType2 o3{rows,cols}; + VERIFY(o3(0)==Scalar(rows) && o3(1)==Scalar(cols)); + ArrayType2 o4{int(rows),int(cols)}; + VERIFY(o4(0)==Scalar(rows) && o4(1)==Scalar(cols)); + } + #endif +} + +template void comparisons(const ArrayType& m) +{ + using std::abs; + typedef typename ArrayType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + Index r = internal::random(0, rows-1), + c = internal::random(0, cols-1); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2 = ArrayType::Random(rows, cols), + m3(rows, cols), + m4 = m1; + + m4 = (m4.abs()==Scalar(0)).select(1,m4); + + VERIFY(((m1 + Scalar(1)) > m1).all()); + VERIFY(((m1 - Scalar(1)) < m1).all()); + if (rows*cols>1) + { + m3 = m1; + m3(r,c) += 1; + VERIFY(! (m1 < m3).all() ); + VERIFY(! (m1 > m3).all() ); + } + VERIFY(!(m1 > m2 && m1 < m2).any()); + VERIFY((m1 <= m2 || m1 >= m2).all()); + + // comparisons array to scalar + VERIFY( (m1 != (m1(r,c)+1) ).any() ); + VERIFY( (m1 > (m1(r,c)-1) ).any() ); + VERIFY( (m1 < (m1(r,c)+1) ).any() ); + VERIFY( (m1 == m1(r,c) ).any() ); + + // comparisons scalar to array + VERIFY( ( (m1(r,c)+1) != m1).any() ); + VERIFY( ( (m1(r,c)-1) < m1).any() ); + VERIFY( ( (m1(r,c)+1) > m1).any() ); + VERIFY( ( m1(r,c) == m1).any() ); + + // test Select + VERIFY_IS_APPROX( (m1m2).select(m1,m2), m1.cwiseMax(m2) ); + Scalar mid = (m1.cwiseAbs().minCoeff() + m1.cwiseAbs().maxCoeff())/Scalar(2); + for (int j=0; j=ArrayType::Constant(rows,cols,mid)) + .select(m1,0), m3); + // even shorter version: + VERIFY_IS_APPROX( (m1.abs()RealScalar(0.1)).count() == rows*cols); + + // and/or + VERIFY( (m1RealScalar(0)).count() == 0); + VERIFY( (m1=RealScalar(0)).count() == rows*cols); + RealScalar a = m1.abs().mean(); + VERIFY( (m1<-a || m1>a).count() == (m1.abs()>a).count()); + + typedef Array ArrayOfIndices; + + // TODO allows colwise/rowwise for array + VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose()); + VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayOfIndices::Constant(rows, cols)); +} + +template void array_real(const ArrayType& m) +{ + using std::abs; + using std::sqrt; + typedef typename ArrayType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2 = ArrayType::Random(rows, cols), + m3(rows, cols), + m4 = m1; + + // avoid denormalized values so verification doesn't fail on platforms that don't support them + // denormalized behavior is tested elsewhere (unary_op_test, binary_ops_test) + const Scalar min = (std::numeric_limits::min)(); + m1 = (m1.abs()(); + + // these tests are mostly to check possible compilation issues with free-functions. + VERIFY_IS_APPROX(m1.sin(), sin(m1)); + VERIFY_IS_APPROX(m1.cos(), cos(m1)); + VERIFY_IS_APPROX(m1.tan(), tan(m1)); + VERIFY_IS_APPROX(m1.asin(), asin(m1)); + VERIFY_IS_APPROX(m1.acos(), acos(m1)); + VERIFY_IS_APPROX(m1.atan(), atan(m1)); + VERIFY_IS_APPROX(m1.sinh(), sinh(m1)); + VERIFY_IS_APPROX(m1.cosh(), cosh(m1)); + VERIFY_IS_APPROX(m1.tanh(), tanh(m1)); +#if EIGEN_HAS_CXX11_MATH + VERIFY_IS_APPROX(m1.tanh().atanh(), atanh(tanh(m1))); + VERIFY_IS_APPROX(m1.sinh().asinh(), asinh(sinh(m1))); + VERIFY_IS_APPROX(m1.cosh().acosh(), acosh(cosh(m1))); +#endif + VERIFY_IS_APPROX(m1.logistic(), logistic(m1)); + + VERIFY_IS_APPROX(m1.arg(), arg(m1)); + VERIFY_IS_APPROX(m1.round(), round(m1)); + VERIFY_IS_APPROX(m1.rint(), rint(m1)); + VERIFY_IS_APPROX(m1.floor(), floor(m1)); + VERIFY_IS_APPROX(m1.ceil(), ceil(m1)); + VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all()); + VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all()); + VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all()); + VERIFY_IS_APPROX(m4.inverse(), inverse(m4)); + VERIFY_IS_APPROX(m1.abs(), abs(m1)); + VERIFY_IS_APPROX(m1.abs2(), abs2(m1)); + VERIFY_IS_APPROX(m1.square(), square(m1)); + VERIFY_IS_APPROX(m1.cube(), cube(m1)); + VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); + VERIFY_IS_APPROX(m1.sign(), sign(m1)); + VERIFY((m1.sqrt().sign().isNaN() == (Eigen::isnan)(sign(sqrt(m1)))).all()); + + // avoid inf and NaNs so verification doesn't fail + m3 = m4.abs(); + + VERIFY_IS_APPROX(m3.sqrt(), sqrt(abs(m3))); + VERIFY_IS_APPROX(m3.rsqrt(), Scalar(1)/sqrt(abs(m3))); + VERIFY_IS_APPROX(rsqrt(m3), Scalar(1)/sqrt(abs(m3))); + VERIFY_IS_APPROX(m3.log(), log(m3)); + VERIFY_IS_APPROX(m3.log1p(), log1p(m3)); + VERIFY_IS_APPROX(m3.log10(), log10(m3)); + VERIFY_IS_APPROX(m3.log2(), log2(m3)); + + + VERIFY((!(m1>m2) == (m1<=m2)).all()); + + VERIFY_IS_APPROX(sin(m1.asin()), m1); + VERIFY_IS_APPROX(cos(m1.acos()), m1); + VERIFY_IS_APPROX(tan(m1.atan()), m1); + VERIFY_IS_APPROX(sinh(m1), Scalar(0.5)*(exp(m1)-exp(-m1))); + VERIFY_IS_APPROX(cosh(m1), Scalar(0.5)*(exp(m1)+exp(-m1))); + VERIFY_IS_APPROX(tanh(m1), (Scalar(0.5)*(exp(m1)-exp(-m1)))/(Scalar(0.5)*(exp(m1)+exp(-m1)))); + VERIFY_IS_APPROX(logistic(m1), (Scalar(1)/(Scalar(1)+exp(-m1)))); + VERIFY_IS_APPROX(arg(m1), ((m1())*Scalar(std::acos(Scalar(-1)))); + VERIFY((round(m1) <= ceil(m1) && round(m1) >= floor(m1)).all()); + VERIFY((rint(m1) <= ceil(m1) && rint(m1) >= floor(m1)).all()); + VERIFY(((ceil(m1) - round(m1)) <= Scalar(0.5) || (round(m1) - floor(m1)) <= Scalar(0.5)).all()); + VERIFY(((ceil(m1) - round(m1)) <= Scalar(1.0) && (round(m1) - floor(m1)) <= Scalar(1.0)).all()); + VERIFY(((ceil(m1) - rint(m1)) <= Scalar(0.5) || (rint(m1) - floor(m1)) <= Scalar(0.5)).all()); + VERIFY(((ceil(m1) - rint(m1)) <= Scalar(1.0) && (rint(m1) - floor(m1)) <= Scalar(1.0)).all()); + VERIFY((Eigen::isnan)((m1*Scalar(0))/Scalar(0)).all()); + VERIFY((Eigen::isinf)(m4/Scalar(0)).all()); + VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*Scalar(0)/Scalar(0))) && (!(Eigen::isfinite)(m4/Scalar(0)))).all()); + VERIFY_IS_APPROX(inverse(inverse(m4)),m4); + VERIFY((abs(m1) == m1 || abs(m1) == -m1).all()); + VERIFY_IS_APPROX(m3, sqrt(abs2(m3))); + VERIFY_IS_APPROX(m1.absolute_difference(m2), (m1 > m2).select(m1 - m2, m2 - m1)); + VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() ); + VERIFY_IS_APPROX( m1*m1.sign(),m1.abs()); + VERIFY_IS_APPROX(m1.sign() * m1.abs(), m1); + + VERIFY_IS_APPROX(numext::abs2(numext::real(m1)) + numext::abs2(numext::imag(m1)), numext::abs2(m1)); + VERIFY_IS_APPROX(numext::abs2(Eigen::real(m1)) + numext::abs2(Eigen::imag(m1)), numext::abs2(m1)); + if(!NumTraits::IsComplex) + VERIFY_IS_APPROX(numext::real(m1), m1); + + // shift argument of logarithm so that it is not zero + Scalar smallNumber = NumTraits::dummy_precision(); + VERIFY_IS_APPROX((m3 + smallNumber).log() , log(abs(m3) + smallNumber)); + VERIFY_IS_APPROX((m3 + smallNumber + Scalar(1)).log() , log1p(abs(m3) + smallNumber)); + + VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); + VERIFY_IS_APPROX(m1.exp(), exp(m1)); + VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); + + VERIFY_IS_APPROX(m1.expm1(), expm1(m1)); + VERIFY_IS_APPROX((m3 + smallNumber).exp() - Scalar(1), expm1(abs(m3) + smallNumber)); + + VERIFY_IS_APPROX(m3.pow(RealScalar(0.5)), m3.sqrt()); + VERIFY_IS_APPROX(pow(m3,RealScalar(0.5)), m3.sqrt()); + + VERIFY_IS_APPROX(m3.pow(RealScalar(-0.5)), m3.rsqrt()); + VERIFY_IS_APPROX(pow(m3,RealScalar(-0.5)), m3.rsqrt()); + + // Avoid inf and NaN. + m3 = (m1.square()::epsilon()).select(Scalar(1),m3); + VERIFY_IS_APPROX(m3.pow(RealScalar(-2)), m3.square().inverse()); + pow_test(); + + VERIFY_IS_APPROX(log10(m3), log(m3)/numext::log(Scalar(10))); + VERIFY_IS_APPROX(log2(m3), log(m3)/numext::log(Scalar(2))); + + // scalar by array division + const RealScalar tiny = sqrt(std::numeric_limits::epsilon()); + s1 += Scalar(tiny); + m1 += ArrayType::Constant(rows,cols,Scalar(tiny)); + VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse()); + + // check inplace transpose + m3 = m1; + m3.transposeInPlace(); + VERIFY_IS_APPROX(m3, m1.transpose()); + m3.transposeInPlace(); + VERIFY_IS_APPROX(m3, m1); +} + +template void array_complex(const ArrayType& m) +{ + typedef typename ArrayType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2(rows, cols), + m4 = m1; + + m4.real() = (m4.real().abs()==RealScalar(0)).select(RealScalar(1),m4.real()); + m4.imag() = (m4.imag().abs()==RealScalar(0)).select(RealScalar(1),m4.imag()); + + Array m3(rows, cols); + + for (Index i = 0; i < m.rows(); ++i) + for (Index j = 0; j < m.cols(); ++j) + m2(i,j) = sqrt(m1(i,j)); + + // these tests are mostly to check possible compilation issues with free-functions. + VERIFY_IS_APPROX(m1.sin(), sin(m1)); + VERIFY_IS_APPROX(m1.cos(), cos(m1)); + VERIFY_IS_APPROX(m1.tan(), tan(m1)); + VERIFY_IS_APPROX(m1.sinh(), sinh(m1)); + VERIFY_IS_APPROX(m1.cosh(), cosh(m1)); + VERIFY_IS_APPROX(m1.tanh(), tanh(m1)); + VERIFY_IS_APPROX(m1.logistic(), logistic(m1)); + VERIFY_IS_APPROX(m1.arg(), arg(m1)); + VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all()); + VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all()); + VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all()); + VERIFY_IS_APPROX(m4.inverse(), inverse(m4)); + VERIFY_IS_APPROX(m1.log(), log(m1)); + VERIFY_IS_APPROX(m1.log10(), log10(m1)); + VERIFY_IS_APPROX(m1.log2(), log2(m1)); + VERIFY_IS_APPROX(m1.abs(), abs(m1)); + VERIFY_IS_APPROX(m1.abs2(), abs2(m1)); + VERIFY_IS_APPROX(m1.sqrt(), sqrt(m1)); + VERIFY_IS_APPROX(m1.square(), square(m1)); + VERIFY_IS_APPROX(m1.cube(), cube(m1)); + VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); + VERIFY_IS_APPROX(m1.sign(), sign(m1)); + + + VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); + VERIFY_IS_APPROX(m1.exp(), exp(m1)); + VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); + + VERIFY_IS_APPROX(m1.expm1(), expm1(m1)); + VERIFY_IS_APPROX(expm1(m1), exp(m1) - 1.); + // Check for larger magnitude complex numbers that expm1 matches exp - 1. + VERIFY_IS_APPROX(expm1(10. * m1), exp(10. * m1) - 1.); + + VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1))); + VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1))); + VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1)))); + VERIFY_IS_APPROX(logistic(m1), (1.0/(1.0 + exp(-m1)))); + + for (Index i = 0; i < m.rows(); ++i) + for (Index j = 0; j < m.cols(); ++j) + m3(i,j) = std::atan2(m1(i,j).imag(), m1(i,j).real()); + VERIFY_IS_APPROX(arg(m1), m3); + + std::complex zero(0.0,0.0); + VERIFY((Eigen::isnan)(m1*zero/zero).all()); +#if EIGEN_COMP_MSVC + // msvc complex division is not robust + VERIFY((Eigen::isinf)(m4/RealScalar(0)).all()); +#else +#if EIGEN_COMP_CLANG + // clang's complex division is notoriously broken too + if((numext::isinf)(m4(0,0)/RealScalar(0))) { +#endif + VERIFY((Eigen::isinf)(m4/zero).all()); +#if EIGEN_COMP_CLANG + } + else + { + VERIFY((Eigen::isinf)(m4.real()/zero.real()).all()); + } +#endif +#endif // MSVC + + VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*zero/zero)) && (!(Eigen::isfinite)(m1/zero))).all()); + + VERIFY_IS_APPROX(inverse(inverse(m4)),m4); + VERIFY_IS_APPROX(conj(m1.conjugate()), m1); + VERIFY_IS_APPROX(abs(m1), sqrt(square(m1.real())+square(m1.imag()))); + VERIFY_IS_APPROX(abs(m1), sqrt(abs2(m1))); + VERIFY_IS_APPROX(log10(m1), log(m1)/log(10)); + VERIFY_IS_APPROX(log2(m1), log(m1)/log(2)); + + VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() ); + VERIFY_IS_APPROX( m1.sign() * m1.abs(), m1); + + // scalar by array division + Scalar s1 = internal::random(); + const RealScalar tiny = std::sqrt(std::numeric_limits::epsilon()); + s1 += Scalar(tiny); + m1 += ArrayType::Constant(rows,cols,Scalar(tiny)); + VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse()); + + // check inplace transpose + m2 = m1; + m2.transposeInPlace(); + VERIFY_IS_APPROX(m2, m1.transpose()); + m2.transposeInPlace(); + VERIFY_IS_APPROX(m2, m1); + // Check vectorized inplace transpose. + ArrayType m5 = ArrayType::Random(131, 131); + ArrayType m6 = m5; + m6.transposeInPlace(); + VERIFY_IS_APPROX(m6, m5.transpose()); +} + +template void min_max(const ArrayType& m) +{ + typedef typename ArrayType::Scalar Scalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols); + + // min/max with array + Scalar maxM1 = m1.maxCoeff(); + Scalar minM1 = m1.minCoeff(); + + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)(ArrayType::Constant(rows,cols, minM1))); + VERIFY_IS_APPROX(m1, (m1.min)(ArrayType::Constant(rows,cols, maxM1))); + + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)(ArrayType::Constant(rows,cols, maxM1))); + VERIFY_IS_APPROX(m1, (m1.max)(ArrayType::Constant(rows,cols, minM1))); + + // min/max with scalar input + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)( minM1)); + VERIFY_IS_APPROX(m1, (m1.min)( maxM1)); + + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)( maxM1)); + VERIFY_IS_APPROX(m1, (m1.max)( minM1)); + + + // min/max with various NaN propagation options. + if (m1.size() > 1 && !NumTraits::IsInteger) { + m1(0,0) = NumTraits::quiet_NaN(); + maxM1 = m1.template maxCoeff(); + minM1 = m1.template minCoeff(); + VERIFY((numext::isnan)(maxM1)); + VERIFY((numext::isnan)(minM1)); + + maxM1 = m1.template maxCoeff(); + minM1 = m1.template minCoeff(); + VERIFY(!(numext::isnan)(maxM1)); + VERIFY(!(numext::isnan)(minM1)); + } +} + +template +struct shift_left { + template + Scalar operator()(const Scalar& v) const { + return v << N; + } +}; + +template +struct arithmetic_shift_right { + template + Scalar operator()(const Scalar& v) const { + return v >> N; + } +}; + +template void array_integer(const ArrayType& m) +{ + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2(rows, cols); + + m2 = m1.template shiftLeft<2>(); + VERIFY( (m2 == m1.unaryExpr(shift_left<2>())).all() ); + m2 = m1.template shiftLeft<9>(); + VERIFY( (m2 == m1.unaryExpr(shift_left<9>())).all() ); + + m2 = m1.template shiftRight<2>(); + VERIFY( (m2 == m1.unaryExpr(arithmetic_shift_right<2>())).all() ); + m2 = m1.template shiftRight<9>(); + VERIFY( (m2 == m1.unaryExpr(arithmetic_shift_right<9>())).all() ); +} + +EIGEN_DECLARE_TEST(array_cwise) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( array(Array()) ); + CALL_SUBTEST_2( array(Array22f()) ); + CALL_SUBTEST_3( array(Array44d()) ); + CALL_SUBTEST_4( array(ArrayXXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_5( array(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array(Array(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array_integer(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array_integer(Array(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( comparisons(Array()) ); + CALL_SUBTEST_2( comparisons(Array22f()) ); + CALL_SUBTEST_3( comparisons(Array44d()) ); + CALL_SUBTEST_5( comparisons(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( comparisons(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( min_max(Array()) ); + CALL_SUBTEST_2( min_max(Array22f()) ); + CALL_SUBTEST_3( min_max(Array44d()) ); + CALL_SUBTEST_5( min_max(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( min_max(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( array_real(Array()) ); + CALL_SUBTEST_2( array_real(Array22f()) ); + CALL_SUBTEST_3( array_real(Array44d()) ); + CALL_SUBTEST_5( array_real(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( array_real(Array()) ); + CALL_SUBTEST_8( array_real(Array()) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_4( array_complex(ArrayXXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + + VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, int >::value)); + VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, float >::value)); + VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, ArrayBase >::value)); + typedef CwiseUnaryOp, ArrayXd > Xpr; + VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, + ArrayBase + >::value)); +} diff --git a/include/eigen/test/array_reverse.cpp b/include/eigen/test/array_reverse.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c77528a5bba280a30af5fc5c0feff638d1b4762d --- /dev/null +++ b/include/eigen/test/array_reverse.cpp @@ -0,0 +1,204 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2009 Ricard Marxer +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +using namespace std; + +template void reverse(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + typedef Matrix VectorType; + + Index rows = m.rows(); + Index cols = m.cols(); + + // this test relies a lot on Random.h, and there's not much more that we can do + // to test it, hence I consider that we will have tested Random.h + MatrixType m1 = MatrixType::Random(rows, cols), m2; + VectorType v1 = VectorType::Random(rows); + + MatrixType m1_r = m1.reverse(); + // Verify that MatrixBase::reverse() works + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_r(i, j), m1(rows - 1 - i, cols - 1 - j)); + } + } + + Reverse m1_rd(m1); + // Verify that a Reverse default (in both directions) of an expression works + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_rd(i, j), m1(rows - 1 - i, cols - 1 - j)); + } + } + + Reverse m1_rb(m1); + // Verify that a Reverse in both directions of an expression works + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_rb(i, j), m1(rows - 1 - i, cols - 1 - j)); + } + } + + Reverse m1_rv(m1); + // Verify that a Reverse in the vertical directions of an expression works + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_rv(i, j), m1(rows - 1 - i, j)); + } + } + + Reverse m1_rh(m1); + // Verify that a Reverse in the horizontal directions of an expression works + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_rh(i, j), m1(i, cols - 1 - j)); + } + } + + VectorType v1_r = v1.reverse(); + // Verify that a VectorType::reverse() of an expression works + for ( int i = 0; i < rows; i++ ) { + VERIFY_IS_APPROX(v1_r(i), v1(rows - 1 - i)); + } + + MatrixType m1_cr = m1.colwise().reverse(); + // Verify that PartialRedux::reverse() works (for colwise()) + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_cr(i, j), m1(rows - 1 - i, j)); + } + } + + MatrixType m1_rr = m1.rowwise().reverse(); + // Verify that PartialRedux::reverse() works (for rowwise()) + for ( int i = 0; i < rows; i++ ) { + for ( int j = 0; j < cols; j++ ) { + VERIFY_IS_APPROX(m1_rr(i, j), m1(i, cols - 1 - j)); + } + } + + Scalar x = internal::random(); + + Index r = internal::random(0, rows-1), + c = internal::random(0, cols-1); + + m1.reverse()(r, c) = x; + VERIFY_IS_APPROX(x, m1(rows - 1 - r, cols - 1 - c)); + + m2 = m1; + m2.reverseInPlace(); + VERIFY_IS_APPROX(m2,m1.reverse().eval()); + + m2 = m1; + m2.col(0).reverseInPlace(); + VERIFY_IS_APPROX(m2.col(0),m1.col(0).reverse().eval()); + + m2 = m1; + m2.row(0).reverseInPlace(); + VERIFY_IS_APPROX(m2.row(0),m1.row(0).reverse().eval()); + + m2 = m1; + m2.rowwise().reverseInPlace(); + VERIFY_IS_APPROX(m2,m1.rowwise().reverse().eval()); + + m2 = m1; + m2.colwise().reverseInPlace(); + VERIFY_IS_APPROX(m2,m1.colwise().reverse().eval()); + + m1.colwise().reverse()(r, c) = x; + VERIFY_IS_APPROX(x, m1(rows - 1 - r, c)); + + m1.rowwise().reverse()(r, c) = x; + VERIFY_IS_APPROX(x, m1(r, cols - 1 - c)); +} + +template +void array_reverse_extra() +{ + Vector4f x; x << 1, 2, 3, 4; + Vector4f y; y << 4, 3, 2, 1; + VERIFY(x.reverse()[1] == 3); + VERIFY(x.reverse() == y); +} + +// Simpler version of reverseInPlace leveraging a bug +// in clang 6/7 with -O2 and AVX or AVX512 enabled. +// This simpler version ensure that the clang bug is not simply hidden +// through mis-inlining of reverseInPlace or other minor changes. +template +EIGEN_DONT_INLINE +void bug1684_job1(MatrixType& m1, MatrixType& m2) +{ + m2 = m1; + m2.col(0).swap(m2.col(3)); + m2.col(1).swap(m2.col(2)); +} + +template +EIGEN_DONT_INLINE +void bug1684_job2(MatrixType& m1, MatrixType& m2) +{ + m2 = m1; // load m1/m2 in AVX registers + m1.col(0) = m2.col(3); // perform 128 bits moves + m1.col(1) = m2.col(2); + m1.col(2) = m2.col(1); + m1.col(3) = m2.col(0); +} + +template +EIGEN_DONT_INLINE +void bug1684_job3(MatrixType& m1, MatrixType& m2) +{ + m2 = m1; + Vector4f tmp; + tmp = m2.col(0); + m2.col(0) = m2.col(3); + m2.col(3) = tmp; + tmp = m2.col(1); + m2.col(1) = m2.col(2); + m2.col(2) = tmp; + +} + +template +void bug1684() +{ + Matrix4f m1 = Matrix4f::Random(); + Matrix4f m2 = Matrix4f::Random(); + bug1684_job1(m1,m2); + VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); + bug1684_job2(m1,m2); + VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); + // This one still fail after our swap's workaround, + // but I expect users not to implement their own swap. + // bug1684_job3(m1,m2); + // VERIFY_IS_APPROX(m2, m1.rowwise().reverse().eval()); +} + +EIGEN_DECLARE_TEST(array_reverse) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( reverse(Matrix()) ); + CALL_SUBTEST_2( reverse(Matrix2f()) ); + CALL_SUBTEST_3( reverse(Matrix4f()) ); + CALL_SUBTEST_4( reverse(Matrix4d()) ); + CALL_SUBTEST_5( reverse(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( reverse(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( reverse(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_8( reverse(Matrix()) ); + CALL_SUBTEST_9( reverse(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_3( bug1684<0>() ); + } + CALL_SUBTEST_3( array_reverse_extra<0>() ); +} diff --git a/include/eigen/test/bdcsvd.cpp b/include/eigen/test/bdcsvd.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ac83f52f589531876445d8ddf4fc5117dfa4c7b --- /dev/null +++ b/include/eigen/test/bdcsvd.cpp @@ -0,0 +1,163 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/ + +// We explicitly disable deprecated declarations for this set of tests +// because we purposely verify assertions for the deprecated SVD runtime +// option behavior. +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#elif defined(_MSC_VER) +#pragma warning( disable : 4996 ) +#endif + +// discard stack allocation as that too bypasses malloc +#define EIGEN_STACK_ALLOCATION_LIMIT 0 +#define EIGEN_RUNTIME_NO_MALLOC + +#include "main.h" +#include +#include +#include + + +#define SVD_DEFAULT(M) BDCSVD +#define SVD_FOR_MIN_NORM(M) BDCSVD +#include "svd_common.h" + +// Check all variants of JacobiSVD +template +void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true) +{ + MatrixType m; + if(pickrandom) { + m.resizeLike(a); + svd_fill_random(m); + } + else + m = a; + + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); +} + +template +void bdcsvd_method() +{ + enum { Size = MatrixType::RowsAtCompileTime }; + typedef typename MatrixType::RealScalar RealScalar; + typedef Matrix RealVecType; + MatrixType m = MatrixType::Identity(); + VERIFY_IS_APPROX(m.bdcSvd().singularValues(), RealVecType::Ones()); + VERIFY_RAISES_ASSERT(m.bdcSvd().matrixU()); + VERIFY_RAISES_ASSERT(m.bdcSvd().matrixV()); + + // Deprecated behavior. + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); +} + +// Compare the Singular values returned with Jacobi and Bdc. +template +void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0, int algoswap = 16, bool random = true) +{ + MatrixType m = random ? MatrixType::Random(a.rows(), a.cols()) : a; + + BDCSVD bdc_svd(m.rows(), m.cols(), computationOptions); + bdc_svd.setSwitchSize(algoswap); + bdc_svd.compute(m); + + JacobiSVD jacobi_svd(m); + VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues()); + + if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU()); + if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU()); + if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); + if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); +} + +// Verifies total deflation is **not** triggered. +void compare_bdc_jacobi_instance(bool structure_as_m, int algoswap = 16) +{ + MatrixXd m(4, 3); + if (structure_as_m) { + // The first 3 rows are the reduced form of Matrix 1 as shown below, and it + // has nonzero elements in the first column and diagonals only. + m << 1.056293, 0, 0, + -0.336468, 0.907359, 0, + -1.566245, 0, 0.149150, + -0.1, 0, 0; + } else { + // Matrix 1. + m << 0.882336, 18.3914, -26.7921, + -5.58135, 17.1931, -24.0892, + -20.794, 8.68496, -4.83103, + -8.4981, -10.5451, 23.9072; + } + compare_bdc_jacobi(m, 0, algoswap, false); +} + +EIGEN_DECLARE_TEST(bdcsvd) +{ + CALL_SUBTEST_1(( svd_verify_assert >(Matrix3f()) )); + CALL_SUBTEST_2(( svd_verify_assert >(Matrix4d()) )); + CALL_SUBTEST_3(( svd_verify_assert >(MatrixXf(10,12)) )); + CALL_SUBTEST_4(( svd_verify_assert >(MatrixXcd(7,5)) )); + + CALL_SUBTEST_5(( svd_all_trivial_2x2(bdcsvd) )); + CALL_SUBTEST_6(( svd_all_trivial_2x2(bdcsvd) )); + + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(( bdcsvd() )); + CALL_SUBTEST_2(( bdcsvd() )); + CALL_SUBTEST_7(( bdcsvd >() )); + + int r = internal::random(1, EIGEN_TEST_MAX_SIZE/2), + c = internal::random(1, EIGEN_TEST_MAX_SIZE/2); + + TEST_SET_BUT_UNUSED_VARIABLE(r) + TEST_SET_BUT_UNUSED_VARIABLE(c) + + CALL_SUBTEST_8(( bdcsvd(Matrix(r,2)) )); + CALL_SUBTEST_9(( bdcsvd(MatrixXf(r,c)) )); + CALL_SUBTEST_10(( compare_bdc_jacobi(MatrixXf(r,c)) )); + CALL_SUBTEST_11(( bdcsvd(MatrixXd(r,c)) )); + CALL_SUBTEST_12(( compare_bdc_jacobi(MatrixXd(r,c)) )); + CALL_SUBTEST_13(( bdcsvd(MatrixXcd(r,c)) )); + CALL_SUBTEST_14(( compare_bdc_jacobi(MatrixXcd(r,c)) )); + + // Test on inf/nan matrix + CALL_SUBTEST_15( (svd_inf_nan, MatrixXf>()) ); + CALL_SUBTEST_16( (svd_inf_nan, MatrixXd>()) ); + } + + // test matrixbase method + CALL_SUBTEST_17(( bdcsvd_method() )); + CALL_SUBTEST_18(( bdcsvd_method() )); + + // Test problem size constructors + CALL_SUBTEST_19( BDCSVD(10,10) ); + + // Check that preallocation avoids subsequent mallocs + // Disabled because not supported by BDCSVD + // CALL_SUBTEST_9( svd_preallocate() ); + + CALL_SUBTEST_20( svd_underoverflow() ); + + // Without total deflation issues. + CALL_SUBTEST_21(( compare_bdc_jacobi_instance(true) )); + CALL_SUBTEST_22(( compare_bdc_jacobi_instance(false) )); + + // With total deflation issues before, when it shouldn't be triggered. + CALL_SUBTEST_23(( compare_bdc_jacobi_instance(true, 3) )); + CALL_SUBTEST_24(( compare_bdc_jacobi_instance(false, 3) )); +} + diff --git a/include/eigen/test/bfloat16_float.cpp b/include/eigen/test/bfloat16_float.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3de0b19a0ce6aaa8b65140dd2cebd88e33a1de1 --- /dev/null +++ b/include/eigen/test/bfloat16_float.cpp @@ -0,0 +1,378 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include +#include +#include + +#include "main.h" + +#include + +#define VERIFY_BFLOAT16_BITS_EQUAL(h, bits) \ + VERIFY_IS_EQUAL((numext::bit_cast(h)), (static_cast(bits))) + +// Make sure it's possible to forward declare Eigen::bfloat16 +namespace Eigen { +struct bfloat16; +} + +using Eigen::bfloat16; + +float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa, + uint32_t low_mantissa) { + float dest; + uint32_t src = (sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa; + memcpy(static_cast(&dest), + static_cast(&src), sizeof(dest)); + return dest; +} + +template + void test_roundtrip() { + // Representable T round trip via bfloat16 + VERIFY_IS_EQUAL((internal::cast(internal::cast(-std::numeric_limits::infinity()))), -std::numeric_limits::infinity()); + VERIFY_IS_EQUAL((internal::cast(internal::cast(std::numeric_limits::infinity()))), std::numeric_limits::infinity()); + VERIFY_IS_EQUAL((internal::cast(internal::cast(T(-1.0)))), T(-1.0)); + VERIFY_IS_EQUAL((internal::cast(internal::cast(T(-0.5)))), T(-0.5)); + VERIFY_IS_EQUAL((internal::cast(internal::cast(T(-0.0)))), T(-0.0)); + VERIFY_IS_EQUAL((internal::cast(internal::cast(T(1.0)))), T(1.0)); + VERIFY_IS_EQUAL((internal::cast(internal::cast(T(0.5)))), T(0.5)); + VERIFY_IS_EQUAL((internal::cast(internal::cast(T(0.0)))), T(0.0)); +} + +void test_conversion() +{ + using Eigen::bfloat16_impl::__bfloat16_raw; + + // Round-trip casts + VERIFY_IS_EQUAL( + numext::bit_cast(numext::bit_cast(bfloat16(1.0f))), + bfloat16(1.0f)); + VERIFY_IS_EQUAL( + numext::bit_cast(numext::bit_cast(bfloat16(0.5f))), + bfloat16(0.5f)); + VERIFY_IS_EQUAL( + numext::bit_cast(numext::bit_cast(bfloat16(-0.33333f))), + bfloat16(-0.33333f)); + VERIFY_IS_EQUAL( + numext::bit_cast(numext::bit_cast(bfloat16(0.0f))), + bfloat16(0.0f)); + + // Conversion from float. + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(1.0f), 0x3f80); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f), 0x3f00); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.33333f), 0x3eab); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3.38e38f), 0x7f7e); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3.40e38f), 0x7f80); // Becomes infinity. + + // Verify round-to-nearest-even behavior. + float val1 = static_cast(bfloat16(__bfloat16_raw(0x3c00))); + float val2 = static_cast(bfloat16(__bfloat16_raw(0x3c01))); + float val3 = static_cast(bfloat16(__bfloat16_raw(0x3c02))); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f * (val1 + val2)), 0x3c00); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.5f * (val2 + val3)), 0x3c02); + + // Conversion from int. + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(-1), 0xbf80); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0), 0x0000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(1), 0x3f80); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(2), 0x4000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(3), 0x4040); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(12), 0x4140); + + // Conversion from bool. + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(false), 0x0000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(true), 0x3f80); + + // Conversion to bool + VERIFY_IS_EQUAL(static_cast(bfloat16(3)), true); + VERIFY_IS_EQUAL(static_cast(bfloat16(0.33333f)), true); + VERIFY_IS_EQUAL(bfloat16(-0.0), false); + VERIFY_IS_EQUAL(static_cast(bfloat16(0.0)), false); + + // Explicit conversion to float. + VERIFY_IS_EQUAL(static_cast(bfloat16(__bfloat16_raw(0x0000))), 0.0f); + VERIFY_IS_EQUAL(static_cast(bfloat16(__bfloat16_raw(0x3f80))), 1.0f); + + // Implicit conversion to float + VERIFY_IS_EQUAL(bfloat16(__bfloat16_raw(0x0000)), 0.0f); + VERIFY_IS_EQUAL(bfloat16(__bfloat16_raw(0x3f80)), 1.0f); + + // Zero representations + VERIFY_IS_EQUAL(bfloat16(0.0f), bfloat16(0.0f)); + VERIFY_IS_EQUAL(bfloat16(-0.0f), bfloat16(0.0f)); + VERIFY_IS_EQUAL(bfloat16(-0.0f), bfloat16(-0.0f)); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(0.0f), 0x0000); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(-0.0f), 0x8000); + + // Default is zero + VERIFY_IS_EQUAL(static_cast(bfloat16()), 0.0f); + + // Representable floats round trip via bfloat16 + test_roundtrip(); + test_roundtrip(); + test_roundtrip >(); + test_roundtrip >(); + + // Conversion + Array a; + for (int i = 0; i < 100; i++) a(i) = i + 1.25; + Array b = a.cast(); + Array c = b.cast(); + for (int i = 0; i < 100; ++i) { + VERIFY_LE(numext::abs(c(i) - a(i)), a(i) / 128); + } + + // Epsilon + VERIFY_LE(1.0f, static_cast((std::numeric_limits::epsilon)() + bfloat16(1.0f))); + VERIFY_IS_EQUAL(1.0f, static_cast((std::numeric_limits::epsilon)() / bfloat16(2.0f) + bfloat16(1.0f))); + + // Negate + VERIFY_IS_EQUAL(static_cast(-bfloat16(3.0f)), -3.0f); + VERIFY_IS_EQUAL(static_cast(-bfloat16(-4.5f)), 4.5f); + + +#if !EIGEN_COMP_MSVC + // Visual Studio errors out on divisions by 0 + VERIFY((numext::isnan)(static_cast(bfloat16(0.0 / 0.0)))); + VERIFY((numext::isinf)(static_cast(bfloat16(1.0 / 0.0)))); + VERIFY((numext::isinf)(static_cast(bfloat16(-1.0 / 0.0)))); + + // Visual Studio errors out on divisions by 0 + VERIFY((numext::isnan)(bfloat16(0.0 / 0.0))); + VERIFY((numext::isinf)(bfloat16(1.0 / 0.0))); + VERIFY((numext::isinf)(bfloat16(-1.0 / 0.0))); +#endif + + // NaNs and infinities. + VERIFY(!(numext::isinf)(static_cast(bfloat16(3.38e38f)))); // Largest finite number. + VERIFY(!(numext::isnan)(static_cast(bfloat16(0.0f)))); + VERIFY((numext::isinf)(static_cast(bfloat16(__bfloat16_raw(0xff80))))); + VERIFY((numext::isnan)(static_cast(bfloat16(__bfloat16_raw(0xffc0))))); + VERIFY((numext::isinf)(static_cast(bfloat16(__bfloat16_raw(0x7f80))))); + VERIFY((numext::isnan)(static_cast(bfloat16(__bfloat16_raw(0x7fc0))))); + + // Exactly same checks as above, just directly on the bfloat16 representation. + VERIFY(!(numext::isinf)(bfloat16(__bfloat16_raw(0x7bff)))); + VERIFY(!(numext::isnan)(bfloat16(__bfloat16_raw(0x0000)))); + VERIFY((numext::isinf)(bfloat16(__bfloat16_raw(0xff80)))); + VERIFY((numext::isnan)(bfloat16(__bfloat16_raw(0xffc0)))); + VERIFY((numext::isinf)(bfloat16(__bfloat16_raw(0x7f80)))); + VERIFY((numext::isnan)(bfloat16(__bfloat16_raw(0x7fc0)))); + + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(BinaryToFloat(0x0, 0xff, 0x40, 0x0)), 0x7fc0); + VERIFY_BFLOAT16_BITS_EQUAL(bfloat16(BinaryToFloat(0x1, 0xff, 0x40, 0x0)), 0xffc0); +} + +void test_numtraits() +{ + std::cout << "epsilon = " << NumTraits::epsilon() << " (0x" << std::hex << numext::bit_cast(NumTraits::epsilon()) << ")" << std::endl; + std::cout << "highest = " << NumTraits::highest() << " (0x" << std::hex << numext::bit_cast(NumTraits::highest()) << ")" << std::endl; + std::cout << "lowest = " << NumTraits::lowest() << " (0x" << std::hex << numext::bit_cast(NumTraits::lowest()) << ")" << std::endl; + std::cout << "min = " << (std::numeric_limits::min)() << " (0x" << std::hex << numext::bit_cast((std::numeric_limits::min)()) << ")" << std::endl; + std::cout << "denorm min = " << (std::numeric_limits::denorm_min)() << " (0x" << std::hex << numext::bit_cast((std::numeric_limits::denorm_min)()) << ")" << std::endl; + std::cout << "infinity = " << NumTraits::infinity() << " (0x" << std::hex << numext::bit_cast(NumTraits::infinity()) << ")" << std::endl; + std::cout << "quiet nan = " << NumTraits::quiet_NaN() << " (0x" << std::hex << numext::bit_cast(NumTraits::quiet_NaN()) << ")" << std::endl; + std::cout << "signaling nan = " << std::numeric_limits::signaling_NaN() << " (0x" << std::hex << numext::bit_cast(std::numeric_limits::signaling_NaN()) << ")" << std::endl; + + VERIFY(NumTraits::IsSigned); + + VERIFY_IS_EQUAL( + numext::bit_cast(std::numeric_limits::infinity()), + numext::bit_cast(bfloat16(std::numeric_limits::infinity())) ); + // There is no guarantee that casting a 32-bit NaN to bfloat16 has a precise + // bit pattern. We test that it is in fact a NaN, then test the signaling + // bit (msb of significand is 1 for quiet, 0 for signaling). + const numext::uint16_t BFLOAT16_QUIET_BIT = 0x0040; + VERIFY( + (numext::isnan)(std::numeric_limits::quiet_NaN()) + && (numext::isnan)(bfloat16(std::numeric_limits::quiet_NaN())) + && ((numext::bit_cast(std::numeric_limits::quiet_NaN()) & BFLOAT16_QUIET_BIT) > 0) + && ((numext::bit_cast(bfloat16(std::numeric_limits::quiet_NaN())) & BFLOAT16_QUIET_BIT) > 0) ); + // After a cast to bfloat16, a signaling NaN may become non-signaling. Thus, + // we check that both are NaN, and that only the `numeric_limits` version is + // signaling. + VERIFY( + (numext::isnan)(std::numeric_limits::signaling_NaN()) + && (numext::isnan)(bfloat16(std::numeric_limits::signaling_NaN())) + && ((numext::bit_cast(std::numeric_limits::signaling_NaN()) & BFLOAT16_QUIET_BIT) == 0) ); + + VERIFY( (std::numeric_limits::min)() > bfloat16(0.f) ); + VERIFY( (std::numeric_limits::denorm_min)() > bfloat16(0.f) ); + VERIFY_IS_EQUAL( (std::numeric_limits::denorm_min)()/bfloat16(2), bfloat16(0.f) ); +} + +void test_arithmetic() +{ + VERIFY_IS_EQUAL(static_cast(bfloat16(2) + bfloat16(2)), 4); + VERIFY_IS_EQUAL(static_cast(bfloat16(2) + bfloat16(-2)), 0); + VERIFY_IS_APPROX(static_cast(bfloat16(0.33333f) + bfloat16(0.66667f)), 1.0f); + VERIFY_IS_EQUAL(static_cast(bfloat16(2.0f) * bfloat16(-5.5f)), -11.0f); + VERIFY_IS_APPROX(static_cast(bfloat16(1.0f) / bfloat16(3.0f)), 0.3339f); + VERIFY_IS_EQUAL(static_cast(-bfloat16(4096.0f)), -4096.0f); + VERIFY_IS_EQUAL(static_cast(-bfloat16(-4096.0f)), 4096.0f); +} + +void test_comparison() +{ + VERIFY(bfloat16(1.0f) > bfloat16(0.5f)); + VERIFY(bfloat16(0.5f) < bfloat16(1.0f)); + VERIFY(!(bfloat16(1.0f) < bfloat16(0.5f))); + VERIFY(!(bfloat16(0.5f) > bfloat16(1.0f))); + + VERIFY(!(bfloat16(4.0f) > bfloat16(4.0f))); + VERIFY(!(bfloat16(4.0f) < bfloat16(4.0f))); + + VERIFY(!(bfloat16(0.0f) < bfloat16(-0.0f))); + VERIFY(!(bfloat16(-0.0f) < bfloat16(0.0f))); + VERIFY(!(bfloat16(0.0f) > bfloat16(-0.0f))); + VERIFY(!(bfloat16(-0.0f) > bfloat16(0.0f))); + + VERIFY(bfloat16(0.2f) > bfloat16(-1.0f)); + VERIFY(bfloat16(-1.0f) < bfloat16(0.2f)); + VERIFY(bfloat16(-16.0f) < bfloat16(-15.0f)); + + VERIFY(bfloat16(1.0f) == bfloat16(1.0f)); + VERIFY(bfloat16(1.0f) != bfloat16(2.0f)); + + // Comparisons with NaNs and infinities. +#if !EIGEN_COMP_MSVC + // Visual Studio errors out on divisions by 0 + VERIFY(!(bfloat16(0.0 / 0.0) == bfloat16(0.0 / 0.0))); + VERIFY(bfloat16(0.0 / 0.0) != bfloat16(0.0 / 0.0)); + + VERIFY(!(bfloat16(1.0) == bfloat16(0.0 / 0.0))); + VERIFY(!(bfloat16(1.0) < bfloat16(0.0 / 0.0))); + VERIFY(!(bfloat16(1.0) > bfloat16(0.0 / 0.0))); + VERIFY(bfloat16(1.0) != bfloat16(0.0 / 0.0)); + + VERIFY(bfloat16(1.0) < bfloat16(1.0 / 0.0)); + VERIFY(bfloat16(1.0) > bfloat16(-1.0 / 0.0)); +#endif +} + +void test_basic_functions() +{ + VERIFY_IS_EQUAL(static_cast(numext::abs(bfloat16(3.5f))), 3.5f); + VERIFY_IS_EQUAL(static_cast(abs(bfloat16(3.5f))), 3.5f); + VERIFY_IS_EQUAL(static_cast(numext::abs(bfloat16(-3.5f))), 3.5f); + VERIFY_IS_EQUAL(static_cast(abs(bfloat16(-3.5f))), 3.5f); + + VERIFY_IS_EQUAL(static_cast(numext::floor(bfloat16(3.5f))), 3.0f); + VERIFY_IS_EQUAL(static_cast(floor(bfloat16(3.5f))), 3.0f); + VERIFY_IS_EQUAL(static_cast(numext::floor(bfloat16(-3.5f))), -4.0f); + VERIFY_IS_EQUAL(static_cast(floor(bfloat16(-3.5f))), -4.0f); + + VERIFY_IS_EQUAL(static_cast(numext::ceil(bfloat16(3.5f))), 4.0f); + VERIFY_IS_EQUAL(static_cast(ceil(bfloat16(3.5f))), 4.0f); + VERIFY_IS_EQUAL(static_cast(numext::ceil(bfloat16(-3.5f))), -3.0f); + VERIFY_IS_EQUAL(static_cast(ceil(bfloat16(-3.5f))), -3.0f); + + VERIFY_IS_APPROX(static_cast(numext::sqrt(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(sqrt(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(numext::sqrt(bfloat16(4.0f))), 2.0f); + VERIFY_IS_APPROX(static_cast(sqrt(bfloat16(4.0f))), 2.0f); + + VERIFY_IS_APPROX(static_cast(numext::pow(bfloat16(0.0f), bfloat16(1.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(pow(bfloat16(0.0f), bfloat16(1.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(numext::pow(bfloat16(2.0f), bfloat16(2.0f))), 4.0f); + VERIFY_IS_APPROX(static_cast(pow(bfloat16(2.0f), bfloat16(2.0f))), 4.0f); + + VERIFY_IS_EQUAL(static_cast(numext::exp(bfloat16(0.0f))), 1.0f); + VERIFY_IS_EQUAL(static_cast(exp(bfloat16(0.0f))), 1.0f); + VERIFY_IS_APPROX(static_cast(numext::exp(bfloat16(EIGEN_PI))), 20.f + static_cast(EIGEN_PI)); + VERIFY_IS_APPROX(static_cast(exp(bfloat16(EIGEN_PI))), 20.f + static_cast(EIGEN_PI)); + + VERIFY_IS_EQUAL(static_cast(numext::expm1(bfloat16(0.0f))), 0.0f); + VERIFY_IS_EQUAL(static_cast(expm1(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(numext::expm1(bfloat16(2.0f))), 6.375f); + VERIFY_IS_APPROX(static_cast(expm1(bfloat16(2.0f))), 6.375f); + + VERIFY_IS_EQUAL(static_cast(numext::log(bfloat16(1.0f))), 0.0f); + VERIFY_IS_EQUAL(static_cast(log(bfloat16(1.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(numext::log(bfloat16(10.0f))), 2.296875f); + VERIFY_IS_APPROX(static_cast(log(bfloat16(10.0f))), 2.296875f); + + VERIFY_IS_EQUAL(static_cast(numext::log1p(bfloat16(0.0f))), 0.0f); + VERIFY_IS_EQUAL(static_cast(log1p(bfloat16(0.0f))), 0.0f); + VERIFY_IS_APPROX(static_cast(numext::log1p(bfloat16(10.0f))), 2.390625f); + VERIFY_IS_APPROX(static_cast(log1p(bfloat16(10.0f))), 2.390625f); +} + +void test_trigonometric_functions() +{ + VERIFY_IS_APPROX(numext::cos(bfloat16(0.0f)), bfloat16(cosf(0.0f))); + VERIFY_IS_APPROX(cos(bfloat16(0.0f)), bfloat16(cosf(0.0f))); + VERIFY_IS_APPROX(numext::cos(bfloat16(EIGEN_PI)), bfloat16(cosf(EIGEN_PI))); + // VERIFY_IS_APPROX(numext::cos(bfloat16(EIGEN_PI/2)), bfloat16(cosf(EIGEN_PI/2))); + // VERIFY_IS_APPROX(numext::cos(bfloat16(3*EIGEN_PI/2)), bfloat16(cosf(3*EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::cos(bfloat16(3.5f)), bfloat16(cosf(3.5f))); + + VERIFY_IS_APPROX(numext::sin(bfloat16(0.0f)), bfloat16(sinf(0.0f))); + VERIFY_IS_APPROX(sin(bfloat16(0.0f)), bfloat16(sinf(0.0f))); + // VERIFY_IS_APPROX(numext::sin(bfloat16(EIGEN_PI)), bfloat16(sinf(EIGEN_PI))); + VERIFY_IS_APPROX(numext::sin(bfloat16(EIGEN_PI/2)), bfloat16(sinf(EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::sin(bfloat16(3*EIGEN_PI/2)), bfloat16(sinf(3*EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::sin(bfloat16(3.5f)), bfloat16(sinf(3.5f))); + + VERIFY_IS_APPROX(numext::tan(bfloat16(0.0f)), bfloat16(tanf(0.0f))); + VERIFY_IS_APPROX(tan(bfloat16(0.0f)), bfloat16(tanf(0.0f))); + // VERIFY_IS_APPROX(numext::tan(bfloat16(EIGEN_PI)), bfloat16(tanf(EIGEN_PI))); + // VERIFY_IS_APPROX(numext::tan(bfloat16(EIGEN_PI/2)), bfloat16(tanf(EIGEN_PI/2))); + // VERIFY_IS_APPROX(numext::tan(bfloat16(3*EIGEN_PI/2)), bfloat16(tanf(3*EIGEN_PI/2))); + VERIFY_IS_APPROX(numext::tan(bfloat16(3.5f)), bfloat16(tanf(3.5f))); +} + +void test_array() +{ + typedef Array ArrayXh; + Index size = internal::random(1,10); + Index i = internal::random(0,size-1); + ArrayXh a1 = ArrayXh::Random(size), a2 = ArrayXh::Random(size); + VERIFY_IS_APPROX( a1+a1, bfloat16(2)*a1 ); + VERIFY( (a1.abs() >= bfloat16(0)).all() ); + VERIFY_IS_APPROX( (a1*a1).sqrt(), a1.abs() ); + + VERIFY( ((a1.min)(a2) <= (a1.max)(a2)).all() ); + a1(i) = bfloat16(-10.); + VERIFY_IS_EQUAL( a1.minCoeff(), bfloat16(-10.) ); + a1(i) = bfloat16(10.); + VERIFY_IS_EQUAL( a1.maxCoeff(), bfloat16(10.) ); + + std::stringstream ss; + ss << a1; +} + +void test_product() +{ + typedef Matrix MatrixXh; + Index rows = internal::random(1,EIGEN_TEST_MAX_SIZE); + Index cols = internal::random(1,EIGEN_TEST_MAX_SIZE); + Index depth = internal::random(1,EIGEN_TEST_MAX_SIZE); + MatrixXh Ah = MatrixXh::Random(rows,depth); + MatrixXh Bh = MatrixXh::Random(depth,cols); + MatrixXh Ch = MatrixXh::Random(rows,cols); + MatrixXf Af = Ah.cast(); + MatrixXf Bf = Bh.cast(); + MatrixXf Cf = Ch.cast(); + VERIFY_IS_APPROX(Ch.noalias()+=Ah*Bh, (Cf.noalias()+=Af*Bf).cast()); +} + +EIGEN_DECLARE_TEST(bfloat16_float) +{ + CALL_SUBTEST(test_numtraits()); + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST(test_conversion()); + CALL_SUBTEST(test_arithmetic()); + CALL_SUBTEST(test_comparison()); + CALL_SUBTEST(test_basic_functions()); + CALL_SUBTEST(test_trigonometric_functions()); + CALL_SUBTEST(test_array()); + CALL_SUBTEST(test_product()); + } +} diff --git a/include/eigen/test/boostmultiprec.cpp b/include/eigen/test/boostmultiprec.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e83e9704479c9a354677f60f0547d464e9b4ea4c --- /dev/null +++ b/include/eigen/test/boostmultiprec.cpp @@ -0,0 +1,207 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include + +#ifdef EIGEN_TEST_MAX_SIZE +#undef EIGEN_TEST_MAX_SIZE +#endif + +#define EIGEN_TEST_MAX_SIZE 50 + +#ifdef EIGEN_TEST_PART_1 +#include "cholesky.cpp" +#endif + +#ifdef EIGEN_TEST_PART_2 +#include "lu.cpp" +#endif + +#ifdef EIGEN_TEST_PART_3 +#include "qr.cpp" +#endif + +#ifdef EIGEN_TEST_PART_4 +#include "qr_colpivoting.cpp" +#endif + +#ifdef EIGEN_TEST_PART_5 +#include "qr_fullpivoting.cpp" +#endif + +#ifdef EIGEN_TEST_PART_6 +#include "eigensolver_selfadjoint.cpp" +#endif + +#ifdef EIGEN_TEST_PART_7 +#include "eigensolver_generic.cpp" +#endif + +#ifdef EIGEN_TEST_PART_8 +#include "eigensolver_generalized_real.cpp" +#endif + +#ifdef EIGEN_TEST_PART_9 +#include "jacobisvd.cpp" +#endif + +#ifdef EIGEN_TEST_PART_10 +#include "bdcsvd.cpp" +#endif + +#ifdef EIGEN_TEST_PART_11 +#include "simplicial_cholesky.cpp" +#endif + +#include + +#undef min +#undef max +#undef isnan +#undef isinf +#undef isfinite +#undef I + +#include +#include +#include +#include +#include + +typedef boost::multiprecision::number, boost::multiprecision::et_on> Real; + +namespace Eigen { + template<> struct NumTraits : GenericNumTraits { + static inline Real dummy_precision() { return 1e-50; } + }; + + template + struct NumTraits > : NumTraits {}; + + template<> + Real test_precision() { return 1e-50; } + + // needed in C++93 mode where number does not support explicit cast. + namespace internal { + template + struct cast_impl { + static inline NewType run(const Real& x) { + return x.template convert_to(); + } + }; + + template<> + struct cast_impl > { + static inline std::complex run(const Real& x) { + return std::complex(x); + } + }; + } +} + +namespace boost { +namespace multiprecision { + // to make ADL works as expected: + using boost::math::isfinite; + using boost::math::isnan; + using boost::math::isinf; + using boost::math::copysign; + using boost::math::hypot; + + // The following is needed for std::complex: + Real fabs(const Real& a) { return abs EIGEN_NOT_A_MACRO (a); } + Real fmax(const Real& a, const Real& b) { using std::max; return max(a,b); } + + // some specialization for the unit tests: + inline bool test_isMuchSmallerThan(const Real& a, const Real& b) { + return internal::isMuchSmallerThan(a, b, test_precision()); + } + + inline bool test_isApprox(const Real& a, const Real& b) { + return internal::isApprox(a, b, test_precision()); + } + + inline bool test_isApproxOrLessThan(const Real& a, const Real& b) { + return internal::isApproxOrLessThan(a, b, test_precision()); + } + + Real get_test_precision(const Real&) { + return test_precision(); + } + + Real test_relative_error(const Real &a, const Real &b) { + using Eigen::numext::abs2; + return sqrt(abs2(a-b)/Eigen::numext::mini(abs2(a),abs2(b))); + } +} +} + +namespace Eigen { + +} + +EIGEN_DECLARE_TEST(boostmultiprec) +{ + typedef Matrix Mat; + typedef Matrix,Dynamic,Dynamic> MatC; + + std::cout << "NumTraits::epsilon() = " << NumTraits::epsilon() << std::endl; + std::cout << "NumTraits::dummy_precision() = " << NumTraits::dummy_precision() << std::endl; + std::cout << "NumTraits::lowest() = " << NumTraits::lowest() << std::endl; + std::cout << "NumTraits::highest() = " << NumTraits::highest() << std::endl; + std::cout << "NumTraits::digits10() = " << NumTraits::digits10() << std::endl; + + // check stream output + { + Mat A(10,10); + A.setRandom(); + std::stringstream ss; + ss << A; + } + { + MatC A(10,10); + A.setRandom(); + std::stringstream ss; + ss << A; + } + + for(int i = 0; i < g_repeat; i++) { + int s = internal::random(1,EIGEN_TEST_MAX_SIZE); + + CALL_SUBTEST_1( cholesky(Mat(s,s)) ); + + CALL_SUBTEST_2( lu_non_invertible() ); + CALL_SUBTEST_2( lu_invertible() ); + CALL_SUBTEST_2( lu_non_invertible() ); + CALL_SUBTEST_2( lu_invertible() ); + + CALL_SUBTEST_3( qr(Mat(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_3( qr_invertible() ); + + CALL_SUBTEST_4( qr() ); + CALL_SUBTEST_4( cod() ); + CALL_SUBTEST_4( qr_invertible() ); + + CALL_SUBTEST_5( qr() ); + CALL_SUBTEST_5( qr_invertible() ); + + CALL_SUBTEST_6( selfadjointeigensolver(Mat(s,s)) ); + + CALL_SUBTEST_7( eigensolver(Mat(s,s)) ); + + CALL_SUBTEST_8( generalized_eigensolver_real(Mat(s,s)) ); + + TEST_SET_BUT_UNUSED_VARIABLE(s) + } + + CALL_SUBTEST_9(( jacobisvd(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + CALL_SUBTEST_10(( bdcsvd(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + + CALL_SUBTEST_11(( test_simplicial_cholesky_T() )); +} diff --git a/include/eigen/test/cholesky.cpp b/include/eigen/test/cholesky.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0b1a7b45b2c2b8d93ad03030fa195e3ac97fb6b8 --- /dev/null +++ b/include/eigen/test/cholesky.cpp @@ -0,0 +1,532 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define TEST_ENABLE_TEMPORARY_TRACKING + +#include "main.h" +#include +#include +#include "solverbase.h" + +template +typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { + if(m.cols()==0) return typename MatrixType::RealScalar(0); + MatrixType symm = m.template selfadjointView(); + return symm.cwiseAbs().colwise().sum().maxCoeff(); +} + +template class CholType> void test_chol_update(const MatrixType& symm) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef Matrix VectorType; + + MatrixType symmLo = symm.template triangularView(); + MatrixType symmUp = symm.template triangularView(); + MatrixType symmCpy = symm; + + CholType chollo(symmLo); + CholType cholup(symmUp); + + for (int k=0; k<10; ++k) + { + VectorType vec = VectorType::Random(symm.rows()); + RealScalar sigma = internal::random(); + symmCpy += sigma * vec * vec.adjoint(); + + // we are doing some downdates, so it might be the case that the matrix is not SPD anymore + CholType chol(symmCpy); + if(chol.info()!=Success) + break; + + chollo.rankUpdate(vec, sigma); + VERIFY_IS_APPROX(symmCpy, chollo.reconstructedMatrix()); + + cholup.rankUpdate(vec, sigma); + VERIFY_IS_APPROX(symmCpy, cholup.reconstructedMatrix()); + } +} + +template void cholesky(const MatrixType& m) +{ + /* this test covers the following files: + LLT.h LDLT.h + */ + Index rows = m.rows(); + Index cols = m.cols(); + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix SquareMatrixType; + typedef Matrix VectorType; + + MatrixType a0 = MatrixType::Random(rows,cols); + VectorType vecB = VectorType::Random(rows), vecX(rows); + MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols); + SquareMatrixType symm = a0 * a0.adjoint(); + // let's make sure the matrix is not singular or near singular + for (int k=0; k<3; ++k) + { + MatrixType a1 = MatrixType::Random(rows,cols); + symm += a1 * a1.adjoint(); + } + + { + STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); + STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); + + SquareMatrixType symmUp = symm.template triangularView(); + SquareMatrixType symmLo = symm.template triangularView(); + + LLT chollo(symmLo); + VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); + + check_solverbase(symm, chollo, rows, rows, 1); + check_solverbase(symm, chollo, rows, cols, rows); + + const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols)); + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / + matrix_l1_norm(symmLo_inverse); + RealScalar rcond_est = chollo.rcond(); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); + + // test the upper mode + LLT cholup(symmUp); + VERIFY_IS_APPROX(symm, cholup.reconstructedMatrix()); + vecX = cholup.solve(vecB); + VERIFY_IS_APPROX(symm * vecX, vecB); + matX = cholup.solve(matB); + VERIFY_IS_APPROX(symm * matX, matB); + + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + const MatrixType symmUp_inverse = cholup.solve(MatrixType::Identity(rows,cols)); + rcond = (RealScalar(1) / matrix_l1_norm(symmUp)) / + matrix_l1_norm(symmUp_inverse); + rcond_est = cholup.rcond(); + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); + + + MatrixType neg = -symmLo; + chollo.compute(neg); + VERIFY(neg.size()==0 || chollo.info()==NumericalIssue); + + VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU())); + VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL())); + VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU())); + VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL())); + + // test some special use cases of SelfCwiseBinaryOp: + MatrixType m1 = MatrixType::Random(rows,cols), m2(rows,cols); + m2 = m1; + m2 += symmLo.template selfadjointView().llt().solve(matB); + VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView().llt().solve(matB)); + m2 = m1; + m2 -= symmLo.template selfadjointView().llt().solve(matB); + VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView().llt().solve(matB)); + m2 = m1; + m2.noalias() += symmLo.template selfadjointView().llt().solve(matB); + VERIFY_IS_APPROX(m2, m1 + symmLo.template selfadjointView().llt().solve(matB)); + m2 = m1; + m2.noalias() -= symmLo.template selfadjointView().llt().solve(matB); + VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView().llt().solve(matB)); + } + + // LDLT + { + STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); + STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); + + int sign = internal::random()%2 ? 1 : -1; + + if(sign == -1) + { + symm = -symm; // test a negative matrix + } + + SquareMatrixType symmUp = symm.template triangularView(); + SquareMatrixType symmLo = symm.template triangularView(); + + LDLT ldltlo(symmLo); + VERIFY(ldltlo.info()==Success); + VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix()); + + check_solverbase(symm, ldltlo, rows, rows, 1); + check_solverbase(symm, ldltlo, rows, cols, rows); + + const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols)); + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / + matrix_l1_norm(symmLo_inverse); + RealScalar rcond_est = ldltlo.rcond(); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); + + + LDLT ldltup(symmUp); + VERIFY(ldltup.info()==Success); + VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix()); + vecX = ldltup.solve(vecB); + VERIFY_IS_APPROX(symm * vecX, vecB); + matX = ldltup.solve(matB); + VERIFY_IS_APPROX(symm * matX, matB); + + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + const MatrixType symmUp_inverse = ldltup.solve(MatrixType::Identity(rows,cols)); + rcond = (RealScalar(1) / matrix_l1_norm(symmUp)) / + matrix_l1_norm(symmUp_inverse); + rcond_est = ldltup.rcond(); + VERIFY(rcond_est >= rcond / 10 && rcond_est <= rcond * 10); + + VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU())); + VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL())); + VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU())); + VERIFY_IS_APPROX(MatrixType(ldltup.matrixU().transpose().conjugate()), MatrixType(ldltup.matrixL())); + + if(MatrixType::RowsAtCompileTime==Dynamic) + { + // note : each inplace permutation requires a small temporary vector (mask) + + // check inplace solve + matX = matB; + VERIFY_EVALUATION_COUNT(matX = ldltlo.solve(matX), 0); + VERIFY_IS_APPROX(matX, ldltlo.solve(matB).eval()); + + + matX = matB; + VERIFY_EVALUATION_COUNT(matX = ldltup.solve(matX), 0); + VERIFY_IS_APPROX(matX, ldltup.solve(matB).eval()); + } + + // restore + if(sign == -1) + symm = -symm; + + // check matrices coming from linear constraints with Lagrange multipliers + if(rows>=3) + { + SquareMatrixType A = symm; + Index c = internal::random(0,rows-2); + A.bottomRightCorner(c,c).setZero(); + // Make sure a solution exists: + vecX.setRandom(); + vecB = A * vecX; + vecX.setZero(); + ldltlo.compute(A); + VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); + vecX = ldltlo.solve(vecB); + VERIFY_IS_APPROX(A * vecX, vecB); + } + + // check non-full rank matrices + if(rows>=3) + { + Index r = internal::random(1,rows-1); + Matrix a = Matrix::Random(rows,r); + SquareMatrixType A = a * a.adjoint(); + // Make sure a solution exists: + vecX.setRandom(); + vecB = A * vecX; + vecX.setZero(); + ldltlo.compute(A); + VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); + vecX = ldltlo.solve(vecB); + VERIFY_IS_APPROX(A * vecX, vecB); + } + + // check matrices with a wide spectrum + if(rows>=3) + { + using std::pow; + using std::sqrt; + RealScalar s = (std::min)(16,std::numeric_limits::max_exponent10/8); + Matrix a = Matrix::Random(rows,rows); + Matrix d = Matrix::Random(rows); + for(Index k=0; k(-s,s)); + SquareMatrixType A = a * d.asDiagonal() * a.adjoint(); + // Make sure a solution exists: + vecX.setRandom(); + vecB = A * vecX; + vecX.setZero(); + ldltlo.compute(A); + VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); + vecX = ldltlo.solve(vecB); + + if(ldltlo.vectorD().real().cwiseAbs().minCoeff()>RealScalar(0)) + { + VERIFY_IS_APPROX(A * vecX,vecB); + } + else + { + RealScalar large_tol = sqrt(test_precision()); + VERIFY((A * vecX).isApprox(vecB, large_tol)); + + ++g_test_level; + VERIFY_IS_APPROX(A * vecX,vecB); + --g_test_level; + } + } + } + + // update/downdate + CALL_SUBTEST(( test_chol_update(symm) )); + CALL_SUBTEST(( test_chol_update(symm) )); +} + +template void cholesky_cplx(const MatrixType& m) +{ + // classic test + cholesky(m); + + // test mixing real/scalar types + + Index rows = m.rows(); + Index cols = m.cols(); + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix RealMatrixType; + typedef Matrix VectorType; + + RealMatrixType a0 = RealMatrixType::Random(rows,cols); + VectorType vecB = VectorType::Random(rows), vecX(rows); + MatrixType matB = MatrixType::Random(rows,cols), matX(rows,cols); + RealMatrixType symm = a0 * a0.adjoint(); + // let's make sure the matrix is not singular or near singular + for (int k=0; k<3; ++k) + { + RealMatrixType a1 = RealMatrixType::Random(rows,cols); + symm += a1 * a1.adjoint(); + } + + { + RealMatrixType symmLo = symm.template triangularView(); + + LLT chollo(symmLo); + VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); + + check_solverbase(symm, chollo, rows, rows, 1); + //check_solverbase(symm, chollo, rows, cols, rows); + } + + // LDLT + { + int sign = internal::random()%2 ? 1 : -1; + + if(sign == -1) + { + symm = -symm; // test a negative matrix + } + + RealMatrixType symmLo = symm.template triangularView(); + + LDLT ldltlo(symmLo); + VERIFY(ldltlo.info()==Success); + VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix()); + + check_solverbase(symm, ldltlo, rows, rows, 1); + //check_solverbase(symm, ldltlo, rows, cols, rows); + } +} + +// regression test for bug 241 +template void cholesky_bug241(const MatrixType& m) +{ + eigen_assert(m.rows() == 2 && m.cols() == 2); + + typedef typename MatrixType::Scalar Scalar; + typedef Matrix VectorType; + + MatrixType matA; + matA << 1, 1, 1, 1; + VectorType vecB; + vecB << 1, 1; + VectorType vecX = matA.ldlt().solve(vecB); + VERIFY_IS_APPROX(matA * vecX, vecB); +} + +// LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal. +// This test checks that LDLT reports correctly that matrix is indefinite. +// See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736 +template void cholesky_definiteness(const MatrixType& m) +{ + eigen_assert(m.rows() == 2 && m.cols() == 2); + MatrixType mat; + LDLT ldlt(2); + + { + mat << 1, 0, 0, -1; + ldlt.compute(mat); + VERIFY(ldlt.info()==Success); + VERIFY(!ldlt.isNegative()); + VERIFY(!ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); + } + { + mat << 1, 2, 2, 1; + ldlt.compute(mat); + VERIFY(ldlt.info()==Success); + VERIFY(!ldlt.isNegative()); + VERIFY(!ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); + } + { + mat << 0, 0, 0, 0; + ldlt.compute(mat); + VERIFY(ldlt.info()==Success); + VERIFY(ldlt.isNegative()); + VERIFY(ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); + } + { + mat << 0, 0, 0, 1; + ldlt.compute(mat); + VERIFY(ldlt.info()==Success); + VERIFY(!ldlt.isNegative()); + VERIFY(ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); + } + { + mat << -1, 0, 0, 0; + ldlt.compute(mat); + VERIFY(ldlt.info()==Success); + VERIFY(ldlt.isNegative()); + VERIFY(!ldlt.isPositive()); + VERIFY_IS_APPROX(mat,ldlt.reconstructedMatrix()); + } +} + +template +void cholesky_faillure_cases() +{ + MatrixXd mat; + LDLT ldlt; + + { + mat.resize(2,2); + mat << 0, 1, 1, 0; + ldlt.compute(mat); + VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); + VERIFY(ldlt.info()==NumericalIssue); + } +#if (!EIGEN_ARCH_i386) || defined(EIGEN_VECTORIZE_SSE2) + { + mat.resize(3,3); + mat << -1, -3, 3, + -3, -8.9999999999999999999, 1, + 3, 1, 0; + ldlt.compute(mat); + VERIFY(ldlt.info()==NumericalIssue); + VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); + } +#endif + { + mat.resize(3,3); + mat << 1, 2, 3, + 2, 4, 1, + 3, 1, 0; + ldlt.compute(mat); + VERIFY(ldlt.info()==NumericalIssue); + VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); + } + + { + mat.resize(8,8); + mat << 0.1, 0, -0.1, 0, 0, 0, 1, 0, + 0, 4.24667, 0, 2.00333, 0, 0, 0, 0, + -0.1, 0, 0.2, 0, -0.1, 0, 0, 0, + 0, 2.00333, 0, 8.49333, 0, 2.00333, 0, 0, + 0, 0, -0.1, 0, 0.1, 0, 0, 1, + 0, 0, 0, 2.00333, 0, 4.24667, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0; + ldlt.compute(mat); + VERIFY(ldlt.info()==NumericalIssue); + VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); + } + + // bug 1479 + { + mat.resize(4,4); + mat << 1, 2, 0, 1, + 2, 4, 0, 2, + 0, 0, 0, 1, + 1, 2, 1, 1; + ldlt.compute(mat); + VERIFY(ldlt.info()==NumericalIssue); + VERIFY_IS_NOT_APPROX(mat,ldlt.reconstructedMatrix()); + } +} + +template void cholesky_verify_assert() +{ + MatrixType tmp; + + LLT llt; + VERIFY_RAISES_ASSERT(llt.matrixL()) + VERIFY_RAISES_ASSERT(llt.matrixU()) + VERIFY_RAISES_ASSERT(llt.solve(tmp)) + VERIFY_RAISES_ASSERT(llt.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(llt.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(llt.solveInPlace(tmp)) + + LDLT ldlt; + VERIFY_RAISES_ASSERT(ldlt.matrixL()) + VERIFY_RAISES_ASSERT(ldlt.transpositionsP()) + VERIFY_RAISES_ASSERT(ldlt.vectorD()) + VERIFY_RAISES_ASSERT(ldlt.isPositive()) + VERIFY_RAISES_ASSERT(ldlt.isNegative()) + VERIFY_RAISES_ASSERT(ldlt.solve(tmp)) + VERIFY_RAISES_ASSERT(ldlt.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(ldlt.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(ldlt.solveInPlace(tmp)) +} + +EIGEN_DECLARE_TEST(cholesky) +{ + int s = 0; + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( cholesky(Matrix()) ); + CALL_SUBTEST_3( cholesky(Matrix2d()) ); + CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) ); + CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) ); + CALL_SUBTEST_4( cholesky(Matrix3f()) ); + CALL_SUBTEST_5( cholesky(Matrix4d()) ); + + s = internal::random(1,EIGEN_TEST_MAX_SIZE); + CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) ); + TEST_SET_BUT_UNUSED_VARIABLE(s) + + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); + CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) ); + TEST_SET_BUT_UNUSED_VARIABLE(s) + } + // empty matrix, regression test for Bug 785: + CALL_SUBTEST_2( cholesky(MatrixXd(0,0)) ); + + // This does not work yet: + // CALL_SUBTEST_2( cholesky(Matrix()) ); + + CALL_SUBTEST_4( cholesky_verify_assert() ); + CALL_SUBTEST_7( cholesky_verify_assert() ); + CALL_SUBTEST_8( cholesky_verify_assert() ); + CALL_SUBTEST_2( cholesky_verify_assert() ); + + // Test problem size constructors + CALL_SUBTEST_9( LLT(10) ); + CALL_SUBTEST_9( LDLT(10) ); + + CALL_SUBTEST_2( cholesky_faillure_cases() ); + + TEST_SET_BUT_UNUSED_VARIABLE(nb_temporaries) +} diff --git a/include/eigen/test/clz.cpp b/include/eigen/test/clz.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d08b47155c8771731154e4687fdf40435bc7fff --- /dev/null +++ b/include/eigen/test/clz.cpp @@ -0,0 +1,74 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2023 The Eigen Authors +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +template +int ref_clz(T val) { + static const int kNumBits = sizeof(T) * CHAR_BIT; + T kMsbMask = T(1) << (kNumBits - 1); + int z = 0; + for (; z < kNumBits && ((val & kMsbMask) == 0); ++z) { + val <<= 1; + } + return z; +} + +template +int ref_ctz(T val) { + static const int kNumBits = sizeof(T) * CHAR_BIT; + T kLsbMask = T(1); + int z = 0; + for (; z < kNumBits && ((val & kLsbMask) == 0); ++z) { + val >>= 1; + } + return z; +} + +template +void test_clz_ctz() { + T step = sizeof(T) <= 2 ? 1 : (Eigen::NumTraits::highest() / (T(1) << 16)); + T iters = Eigen::NumTraits::highest() / step; + for (T i = 0; i < iters; ++i) { + T val = i * step; + int expected_clz = ref_clz(val); + int actual_clz = Eigen::internal::clz(val); + VERIFY(expected_clz == actual_clz); + + int expected_ctz = ref_ctz(val); + int actual_ctz = Eigen::internal::ctz(val); + VERIFY(expected_ctz == actual_ctz); + } +} + +template +void test_clz_ctz_random() { + for (int i = 0; i < 1024 * 1024; ++i) { + T val = Eigen::internal::random(); + int expected_clz = ref_clz(val); + int actual_clz = Eigen::internal::clz(val); + VERIFY(expected_clz == actual_clz); + + int expected_ctz = ref_ctz(val); + int actual_ctz = Eigen::internal::ctz(val); + VERIFY(expected_ctz == actual_ctz); + } +} + +EIGEN_DECLARE_TEST(clz) { + CALL_SUBTEST_1(test_clz_ctz()); + CALL_SUBTEST_2(test_clz_ctz()); + CALL_SUBTEST_3(test_clz_ctz()); + CALL_SUBTEST_4(test_clz_ctz()); + + for (int i = 0; i < g_repeat; i++) { + test_clz_ctz_random(); + test_clz_ctz_random(); + } +} diff --git a/include/eigen/test/commainitializer.cpp b/include/eigen/test/commainitializer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eb275be9cf4527e8fac96cb1f25b2c929695d800 --- /dev/null +++ b/include/eigen/test/commainitializer.cpp @@ -0,0 +1,118 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + + +template +void test_blocks() +{ + Matrix m_fixed; + MatrixXi m_dynamic(M1+M2, N1+N2); + + Matrix mat11; mat11.setRandom(); + Matrix mat12; mat12.setRandom(); + Matrix mat21; mat21.setRandom(); + Matrix mat22; mat22.setRandom(); + + MatrixXi matx11 = mat11, matx12 = mat12, matx21 = mat21, matx22 = mat22; + + { + VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat21, matx22).finished(), (m_dynamic << mat11, matx12, mat21, matx22).finished()); + VERIFY_IS_EQUAL((m_fixed.template topLeftCorner()), mat11); + VERIFY_IS_EQUAL((m_fixed.template topRightCorner()), mat12); + VERIFY_IS_EQUAL((m_fixed.template bottomLeftCorner()), mat21); + VERIFY_IS_EQUAL((m_fixed.template bottomRightCorner()), mat22); + VERIFY_IS_EQUAL((m_fixed << mat12, mat11, matx21, mat22).finished(), (m_dynamic << mat12, matx11, matx21, mat22).finished()); + } + + if(N1 > 0) + { + if(M1 > 0) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22)); + } + if(M2 > 0) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22)); + } + } + else + { + // allow insertion of zero-column blocks: + VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat11, mat11, mat21, mat21, mat22).finished(), (m_dynamic << mat12, mat22).finished()); + } + if(M1 != M2) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat21, mat12, mat22)); + } +} + + +template +struct test_block_recursion +{ + static void run() + { + test_block_recursion::run(); + test_block_recursion::run(); + } +}; + +template +struct test_block_recursion<0,N> +{ + static void run() { + test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>(); + } +}; + +void test_basics() { + Matrix3d m3; + Matrix4d m4; + + VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) ); + + #ifndef _MSC_VER + VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ); + #endif + + double data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + Matrix3d ref = Map >(data); + + m3 = Matrix3d::Random(); + m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9; + VERIFY_IS_APPROX(m3, ref ); + + Vector3d vec[3]; + vec[0] << 1, 4, 7; + vec[1] << 2, 5, 8; + vec[2] << 3, 6, 9; + m3 = Matrix3d::Random(); + m3 << vec[0], vec[1], vec[2]; + VERIFY_IS_APPROX(m3, ref); + + vec[0] << 1, 2, 3; + vec[1] << 4, 5, 6; + vec[2] << 7, 8, 9; + m3 = Matrix3d::Random(); + m3 << vec[0].transpose(), + 4, 5, 6, + vec[2].transpose(); + VERIFY_IS_APPROX(m3, ref); +} + +EIGEN_DECLARE_TEST(commainitializer) +{ + + CALL_SUBTEST_1(test_basics()); + + // recursively test all block-sizes from 0 to 3: + CALL_SUBTEST_2(test_block_recursion<8>::run()); +} diff --git a/include/eigen/test/denseLM.cpp b/include/eigen/test/denseLM.cpp new file mode 100644 index 0000000000000000000000000000000000000000..afb8004b1e35fe909d580133c1736ec4bf19179b --- /dev/null +++ b/include/eigen/test/denseLM.cpp @@ -0,0 +1,190 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire Nuentsa +// Copyright (C) 2012 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include +#include +#include + +#include "main.h" +#include +using namespace std; +using namespace Eigen; + +template +struct DenseLM : DenseFunctor +{ + typedef DenseFunctor Base; + typedef typename Base::JacobianType JacobianType; + typedef Matrix VectorType; + + DenseLM(int n, int m) : DenseFunctor(n,m) + { } + + VectorType model(const VectorType& uv, VectorType& x) + { + VectorType y; // Should change to use expression template + int m = Base::values(); + int n = Base::inputs(); + eigen_assert(uv.size()%2 == 0); + eigen_assert(uv.size() == n); + eigen_assert(x.size() == m); + y.setZero(m); + int half = n/2; + VectorBlock u(uv, 0, half); + VectorBlock v(uv, half, half); + for (int j = 0; j < m; j++) + { + for (int i = 0; i < half; i++) + y(j) += u(i)*std::exp(-(x(j)-i)*(x(j)-i)/(v(i)*v(i))); + } + return y; + + } + void initPoints(VectorType& uv_ref, VectorType& x) + { + m_x = x; + m_y = this->model(uv_ref, x); + } + + int operator()(const VectorType& uv, VectorType& fvec) + { + + int m = Base::values(); + int n = Base::inputs(); + eigen_assert(uv.size()%2 == 0); + eigen_assert(uv.size() == n); + eigen_assert(fvec.size() == m); + int half = n/2; + VectorBlock u(uv, 0, half); + VectorBlock v(uv, half, half); + for (int j = 0; j < m; j++) + { + fvec(j) = m_y(j); + for (int i = 0; i < half; i++) + { + fvec(j) -= u(i) *std::exp(-(m_x(j)-i)*(m_x(j)-i)/(v(i)*v(i))); + } + } + + return 0; + } + int df(const VectorType& uv, JacobianType& fjac) + { + int m = Base::values(); + int n = Base::inputs(); + eigen_assert(n == uv.size()); + eigen_assert(fjac.rows() == m); + eigen_assert(fjac.cols() == n); + int half = n/2; + VectorBlock u(uv, 0, half); + VectorBlock v(uv, half, half); + for (int j = 0; j < m; j++) + { + for (int i = 0; i < half; i++) + { + fjac.coeffRef(j,i) = -std::exp(-(m_x(j)-i)*(m_x(j)-i)/(v(i)*v(i))); + fjac.coeffRef(j,i+half) = -2.*u(i)*(m_x(j)-i)*(m_x(j)-i)/(std::pow(v(i),3)) * std::exp(-(m_x(j)-i)*(m_x(j)-i)/(v(i)*v(i))); + } + } + return 0; + } + VectorType m_x, m_y; //Data Points +}; + +template +int test_minimizeLM(FunctorType& functor, VectorType& uv) +{ + LevenbergMarquardt lm(functor); + LevenbergMarquardtSpace::Status info; + + info = lm.minimize(uv); + + VERIFY_IS_EQUAL(info, 1); + //FIXME Check other parameters + return info; +} + +template +int test_lmder(FunctorType& functor, VectorType& uv) +{ + typedef typename VectorType::Scalar Scalar; + LevenbergMarquardtSpace::Status info; + LevenbergMarquardt lm(functor); + info = lm.lmder1(uv); + + VERIFY_IS_EQUAL(info, 1); + //FIXME Check other parameters + return info; +} + +template +int test_minimizeSteps(FunctorType& functor, VectorType& uv) +{ + LevenbergMarquardtSpace::Status info; + LevenbergMarquardt lm(functor); + info = lm.minimizeInit(uv); + if (info==LevenbergMarquardtSpace::ImproperInputParameters) + return info; + do + { + info = lm.minimizeOneStep(uv); + } while (info==LevenbergMarquardtSpace::Running); + + VERIFY_IS_EQUAL(info, 1); + //FIXME Check other parameters + return info; +} + +template +void test_denseLM_T() +{ + typedef Matrix VectorType; + + int inputs = 10; + int values = 1000; + DenseLM dense_gaussian(inputs, values); + VectorType uv(inputs),uv_ref(inputs); + VectorType x(values); + + // Generate the reference solution + uv_ref << -2, 1, 4 ,8, 6, 1.8, 1.2, 1.1, 1.9 , 3; + + //Generate the reference data points + x.setRandom(); + x = 10*x; + x.array() += 10; + dense_gaussian.initPoints(uv_ref, x); + + // Generate the initial parameters + VectorBlock u(uv, 0, inputs/2); + VectorBlock v(uv, inputs/2, inputs/2); + + // Solve the optimization problem + + //Solve in one go + u.setOnes(); v.setOnes(); + test_minimizeLM(dense_gaussian, uv); + + //Solve until the machine precision + u.setOnes(); v.setOnes(); + test_lmder(dense_gaussian, uv); + + // Solve step by step + v.setOnes(); u.setOnes(); + test_minimizeSteps(dense_gaussian, uv); + +} + +EIGEN_DECLARE_TEST(denseLM) +{ + CALL_SUBTEST_2(test_denseLM_T()); + + // CALL_SUBTEST_2(test_sparseLM_T()); +} diff --git a/include/eigen/test/diagonal_matrix_variadic_ctor.cpp b/include/eigen/test/diagonal_matrix_variadic_ctor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fbc8f8470c702e73957232625c6364e52e6a94af --- /dev/null +++ b/include/eigen/test/diagonal_matrix_variadic_ctor.cpp @@ -0,0 +1,185 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 David Tellenbach +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_NO_STATIC_ASSERT + +#include "main.h" + +template +void assertionTest() +{ + typedef DiagonalMatrix DiagMatrix5; + typedef DiagonalMatrix DiagMatrix7; + typedef DiagonalMatrix DiagMatrixX; + + Scalar raw[6]; + for (int i = 0; i < 6; ++i) { + raw[i] = internal::random(); + } + + VERIFY_RAISES_ASSERT((DiagMatrix5{raw[0], raw[1], raw[2], raw[3]})); + VERIFY_RAISES_ASSERT((DiagMatrix5{raw[0], raw[1], raw[3]})); + VERIFY_RAISES_ASSERT((DiagMatrix7{raw[0], raw[1], raw[2], raw[3]})); + + VERIFY_RAISES_ASSERT((DiagMatrixX { + {raw[0], raw[1], raw[2]}, + {raw[3], raw[4], raw[5]} + })); +} + +#define VERIFY_IMPLICIT_CONVERSION_3(DIAGTYPE, V0, V1, V2) \ + DIAGTYPE d(V0, V1, V2); \ + DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ + VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ + VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ + VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); + +#define VERIFY_IMPLICIT_CONVERSION_4(DIAGTYPE, V0, V1, V2, V3) \ + DIAGTYPE d(V0, V1, V2, V3); \ + DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ + VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ + VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ + VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \ + VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); + +#define VERIFY_IMPLICIT_CONVERSION_5(DIAGTYPE, V0, V1, V2, V3, V4) \ + DIAGTYPE d(V0, V1, V2, V3, V4); \ + DIAGTYPE::DenseMatrixType Dense = d.toDenseMatrix(); \ + VERIFY_IS_APPROX(Dense(0, 0), (Scalar)V0); \ + VERIFY_IS_APPROX(Dense(1, 1), (Scalar)V1); \ + VERIFY_IS_APPROX(Dense(2, 2), (Scalar)V2); \ + VERIFY_IS_APPROX(Dense(3, 3), (Scalar)V3); \ + VERIFY_IS_APPROX(Dense(4, 4), (Scalar)V4); + +template +void constructorTest() +{ + typedef DiagonalMatrix DiagonalMatrix0; + typedef DiagonalMatrix DiagonalMatrix3; + typedef DiagonalMatrix DiagonalMatrix4; + typedef DiagonalMatrix DiagonalMatrixX; + + Scalar raw[7]; + for (int k = 0; k < 7; ++k) raw[k] = internal::random(); + + // Fixed-sized matrices + { + DiagonalMatrix0 a {{}}; + VERIFY(a.rows() == 0); + VERIFY(a.cols() == 0); + typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}}; + VERIFY(a.rows() == 3); + VERIFY(a.cols() == 3); + typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}}; + VERIFY(a.rows() == 4); + VERIFY(a.cols() == 4); + typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + + // dynamically sized matrices + { + DiagonalMatrixX a{{}}; + VERIFY(a.rows() == 0); + VERIFY(a.rows() == 0); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}}; + VERIFY(a.rows() == 7); + VERIFY(a.rows() == 7); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } +} + +template<> +void constructorTest() +{ + typedef float Scalar; + + typedef DiagonalMatrix DiagonalMatrix0; + typedef DiagonalMatrix DiagonalMatrix3; + typedef DiagonalMatrix DiagonalMatrix4; + typedef DiagonalMatrix DiagonalMatrix5; + typedef DiagonalMatrix DiagonalMatrixX; + + Scalar raw[7]; + for (int k = 0; k < 7; ++k) raw[k] = internal::random(); + + // Fixed-sized matrices + { + DiagonalMatrix0 a {{}}; + VERIFY(a.rows() == 0); + VERIFY(a.cols() == 0); + typename DiagonalMatrix0::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix3 a {{raw[0], raw[1], raw[2]}}; + VERIFY(a.rows() == 3); + VERIFY(a.cols() == 3); + typename DiagonalMatrix3::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrix4 a {{raw[0], raw[1], raw[2], raw[3]}}; + VERIFY(a.rows() == 4); + VERIFY(a.cols() == 4); + typename DiagonalMatrix4::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + + // dynamically sized matrices + { + DiagonalMatrixX a{{}}; + VERIFY(a.rows() == 0); + VERIFY(a.rows() == 0); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { + DiagonalMatrixX a{{raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6]}}; + VERIFY(a.rows() == 7); + VERIFY(a.rows() == 7); + typename DiagonalMatrixX::DenseMatrixType m = a.toDenseMatrix(); + for (Index k = 0; k < a.rows(); ++k) VERIFY(m(k, k) == raw[k]); + } + { VERIFY_IMPLICIT_CONVERSION_3(DiagonalMatrix3, 1.2647, 2.56f, -3); } + { VERIFY_IMPLICIT_CONVERSION_4(DiagonalMatrix4, 1.2647, 2.56f, -3, 3.23f); } + { VERIFY_IMPLICIT_CONVERSION_5(DiagonalMatrix5, 1.2647, 2.56f, -3, 3.23f, 2); } +} + +EIGEN_DECLARE_TEST(diagonal_matrix_variadic_ctor) +{ + CALL_SUBTEST_1(assertionTest()); + CALL_SUBTEST_1(assertionTest()); + CALL_SUBTEST_1(assertionTest()); + CALL_SUBTEST_1(assertionTest()); + CALL_SUBTEST_1(assertionTest()); + CALL_SUBTEST_1(assertionTest()); + CALL_SUBTEST_1(assertionTest>()); + + CALL_SUBTEST_2(constructorTest()); + CALL_SUBTEST_2(constructorTest()); + CALL_SUBTEST_2(constructorTest()); + CALL_SUBTEST_2(constructorTest()); + CALL_SUBTEST_2(constructorTest()); + CALL_SUBTEST_2(constructorTest()); + CALL_SUBTEST_2(constructorTest>()); +} diff --git a/include/eigen/test/diagonalmatrices.cpp b/include/eigen/test/diagonalmatrices.cpp new file mode 100644 index 0000000000000000000000000000000000000000..276beade05ee7fd8b93e36006cdeece3580cc55d --- /dev/null +++ b/include/eigen/test/diagonalmatrices.cpp @@ -0,0 +1,173 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +using namespace std; +template void diagonalmatrices(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime }; + typedef Matrix VectorType; + typedef Matrix RowVectorType; + typedef Matrix SquareMatrixType; + typedef Matrix DynMatrixType; + typedef DiagonalMatrix LeftDiagonalMatrix; + typedef DiagonalMatrix RightDiagonalMatrix; + typedef Matrix BigMatrix; + Index rows = m.rows(); + Index cols = m.cols(); + + MatrixType m1 = MatrixType::Random(rows, cols), + m2 = MatrixType::Random(rows, cols); + VectorType v1 = VectorType::Random(rows), + v2 = VectorType::Random(rows); + RowVectorType rv1 = RowVectorType::Random(cols), + rv2 = RowVectorType::Random(cols); + + LeftDiagonalMatrix ldm1(v1), ldm2(v2); + RightDiagonalMatrix rdm1(rv1), rdm2(rv2); + + Scalar s1 = internal::random(); + + SquareMatrixType sq_m1 (v1.asDiagonal()); + VERIFY_IS_APPROX(sq_m1, v1.asDiagonal().toDenseMatrix()); + sq_m1 = v1.asDiagonal(); + VERIFY_IS_APPROX(sq_m1, v1.asDiagonal().toDenseMatrix()); + SquareMatrixType sq_m2 = v1.asDiagonal(); + VERIFY_IS_APPROX(sq_m1, sq_m2); + + ldm1 = v1.asDiagonal(); + LeftDiagonalMatrix ldm3(v1); + VERIFY_IS_APPROX(ldm1.diagonal(), ldm3.diagonal()); + LeftDiagonalMatrix ldm4 = v1.asDiagonal(); + VERIFY_IS_APPROX(ldm1.diagonal(), ldm4.diagonal()); + + sq_m1.block(0,0,rows,rows) = ldm1; + VERIFY_IS_APPROX(sq_m1, ldm1.toDenseMatrix()); + sq_m1.transpose() = ldm1; + VERIFY_IS_APPROX(sq_m1, ldm1.toDenseMatrix()); + + Index i = internal::random(0, rows-1); + Index j = internal::random(0, cols-1); + + VERIFY_IS_APPROX( ((ldm1 * m1)(i,j)) , ldm1.diagonal()(i) * m1(i,j) ); + VERIFY_IS_APPROX( ((ldm1 * (m1+m2))(i,j)) , ldm1.diagonal()(i) * (m1+m2)(i,j) ); + VERIFY_IS_APPROX( ((m1 * rdm1)(i,j)) , rdm1.diagonal()(j) * m1(i,j) ); + VERIFY_IS_APPROX( ((v1.asDiagonal() * m1)(i,j)) , v1(i) * m1(i,j) ); + VERIFY_IS_APPROX( ((m1 * rv1.asDiagonal())(i,j)) , rv1(j) * m1(i,j) ); + VERIFY_IS_APPROX( (((v1+v2).asDiagonal() * m1)(i,j)) , (v1+v2)(i) * m1(i,j) ); + VERIFY_IS_APPROX( (((v1+v2).asDiagonal() * (m1+m2))(i,j)) , (v1+v2)(i) * (m1+m2)(i,j) ); + VERIFY_IS_APPROX( ((m1 * (rv1+rv2).asDiagonal())(i,j)) , (rv1+rv2)(j) * m1(i,j) ); + VERIFY_IS_APPROX( (((m1+m2) * (rv1+rv2).asDiagonal())(i,j)) , (rv1+rv2)(j) * (m1+m2)(i,j) ); + + if(rows>1) + { + DynMatrixType tmp = m1.topRows(rows/2), res; + VERIFY_IS_APPROX( (res = m1.topRows(rows/2) * rv1.asDiagonal()), tmp * rv1.asDiagonal() ); + VERIFY_IS_APPROX( (res = v1.head(rows/2).asDiagonal()*m1.topRows(rows/2)), v1.head(rows/2).asDiagonal()*tmp ); + } + + BigMatrix big; + big.setZero(2*rows, 2*cols); + + big.block(i,j,rows,cols) = m1; + big.block(i,j,rows,cols) = v1.asDiagonal() * big.block(i,j,rows,cols); + + VERIFY_IS_APPROX((big.block(i,j,rows,cols)) , v1.asDiagonal() * m1 ); + + big.block(i,j,rows,cols) = m1; + big.block(i,j,rows,cols) = big.block(i,j,rows,cols) * rv1.asDiagonal(); + VERIFY_IS_APPROX((big.block(i,j,rows,cols)) , m1 * rv1.asDiagonal() ); + + + // scalar multiple + VERIFY_IS_APPROX(LeftDiagonalMatrix(ldm1*s1).diagonal(), ldm1.diagonal() * s1); + VERIFY_IS_APPROX(LeftDiagonalMatrix(s1*ldm1).diagonal(), s1 * ldm1.diagonal()); + + VERIFY_IS_APPROX(m1 * (rdm1 * s1), (m1 * rdm1) * s1); + VERIFY_IS_APPROX(m1 * (s1 * rdm1), (m1 * rdm1) * s1); + + // Diagonal to dense + sq_m1.setRandom(); + sq_m2 = sq_m1; + VERIFY_IS_APPROX( (sq_m1 += (s1*v1).asDiagonal()), sq_m2 += (s1*v1).asDiagonal().toDenseMatrix() ); + VERIFY_IS_APPROX( (sq_m1 -= (s1*v1).asDiagonal()), sq_m2 -= (s1*v1).asDiagonal().toDenseMatrix() ); + VERIFY_IS_APPROX( (sq_m1 = (s1*v1).asDiagonal()), (s1*v1).asDiagonal().toDenseMatrix() ); + + sq_m1.setRandom(); + sq_m2 = v1.asDiagonal(); + sq_m2 = sq_m1 * sq_m2; + VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).col(i), sq_m2.col(i) ); + VERIFY_IS_APPROX( (sq_m1*v1.asDiagonal()).row(i), sq_m2.row(i) ); + + sq_m1 = v1.asDiagonal(); + sq_m2 = v2.asDiagonal(); + SquareMatrixType sq_m3 = v1.asDiagonal(); + VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() + v2.asDiagonal(), sq_m1 + sq_m2); + VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() - v2.asDiagonal(), sq_m1 - sq_m2); + VERIFY_IS_APPROX( sq_m3 = v1.asDiagonal() - 2*v2.asDiagonal() + v1.asDiagonal(), sq_m1 - 2*sq_m2 + sq_m1); +} + +template void as_scalar_product(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + typedef Matrix VectorType; + typedef Matrix DynMatrixType; + typedef Matrix DynVectorType; + typedef Matrix DynRowVectorType; + + Index rows = m.rows(); + Index depth = internal::random(1,EIGEN_TEST_MAX_SIZE); + + VectorType v1 = VectorType::Random(rows); + DynVectorType dv1 = DynVectorType::Random(depth); + DynRowVectorType drv1 = DynRowVectorType::Random(depth); + DynMatrixType dm1 = dv1; + DynMatrixType drm1 = drv1; + + Scalar s = v1(0); + + VERIFY_IS_APPROX( v1.asDiagonal() * drv1, s*drv1 ); + VERIFY_IS_APPROX( dv1 * v1.asDiagonal(), dv1*s ); + + VERIFY_IS_APPROX( v1.asDiagonal() * drm1, s*drm1 ); + VERIFY_IS_APPROX( dm1 * v1.asDiagonal(), dm1*s ); +} + +template +void bug987() +{ + Matrix3Xd points = Matrix3Xd::Random(3, 3); + Vector2d diag = Vector2d::Random(); + Matrix2Xd tmp1 = points.topRows<2>(), res1, res2; + VERIFY_IS_APPROX( res1 = diag.asDiagonal() * points.topRows<2>(), res2 = diag.asDiagonal() * tmp1 ); + Matrix2d tmp2 = points.topLeftCorner<2,2>(); + VERIFY_IS_APPROX(( res1 = points.topLeftCorner<2,2>()*diag.asDiagonal()) , res2 = tmp2*diag.asDiagonal() ); +} + +EIGEN_DECLARE_TEST(diagonalmatrices) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( diagonalmatrices(Matrix()) ); + CALL_SUBTEST_1( as_scalar_product(Matrix()) ); + + CALL_SUBTEST_2( diagonalmatrices(Matrix3f()) ); + CALL_SUBTEST_3( diagonalmatrices(Matrix()) ); + CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) ); + CALL_SUBTEST_5( diagonalmatrices(Matrix()) ); + CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( as_scalar_product(MatrixXcf(1,1)) ); + CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_8( diagonalmatrices(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_9( diagonalmatrices(MatrixXf(1,1)) ); + CALL_SUBTEST_9( as_scalar_product(MatrixXf(1,1)) ); + } + CALL_SUBTEST_10( bug987<0>() ); +} diff --git a/include/eigen/test/eigen2support.cpp b/include/eigen/test/eigen2support.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49d7328e9e15db9509d2777288820b487bad33cd --- /dev/null +++ b/include/eigen/test/eigen2support.cpp @@ -0,0 +1,65 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN2_SUPPORT + +#include "main.h" + +template void eigen2support(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + MatrixType m1 = MatrixType::Random(rows, cols), + m3(rows, cols); + + Scalar s1 = internal::random(), + s2 = internal::random(); + + // scalar addition + VERIFY_IS_APPROX(m1.cwise() + s1, s1 + m1.cwise()); + VERIFY_IS_APPROX(m1.cwise() + s1, MatrixType::Constant(rows,cols,s1) + m1); + VERIFY_IS_APPROX((m1*Scalar(2)).cwise() - s2, (m1+m1) - MatrixType::Constant(rows,cols,s2) ); + m3 = m1; + m3.cwise() += s2; + VERIFY_IS_APPROX(m3, m1.cwise() + s2); + m3 = m1; + m3.cwise() -= s1; + VERIFY_IS_APPROX(m3, m1.cwise() - s1); + + VERIFY_IS_EQUAL((m1.corner(TopLeft,1,1)), (m1.block(0,0,1,1))); + VERIFY_IS_EQUAL((m1.template corner<1,1>(TopLeft)), (m1.template block<1,1>(0,0))); + VERIFY_IS_EQUAL((m1.col(0).start(1)), (m1.col(0).segment(0,1))); + VERIFY_IS_EQUAL((m1.col(0).template start<1>()), (m1.col(0).segment(0,1))); + VERIFY_IS_EQUAL((m1.col(0).end(1)), (m1.col(0).segment(rows-1,1))); + VERIFY_IS_EQUAL((m1.col(0).template end<1>()), (m1.col(0).segment(rows-1,1))); + + using std::cos; + using numext::real; + using numext::abs2; + VERIFY_IS_EQUAL(ei_cos(s1), cos(s1)); + VERIFY_IS_EQUAL(ei_real(s1), real(s1)); + VERIFY_IS_EQUAL(ei_abs2(s1), abs2(s1)); + + m1.minor(0,0); +} + +EIGEN_DECLARE_TEST(eigen2support) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( eigen2support(Matrix()) ); + CALL_SUBTEST_2( eigen2support(MatrixXd(1,1)) ); + CALL_SUBTEST_4( eigen2support(Matrix3f()) ); + CALL_SUBTEST_5( eigen2support(Matrix4d()) ); + CALL_SUBTEST_2( eigen2support(MatrixXf(200,200)) ); + CALL_SUBTEST_6( eigen2support(MatrixXcd(100,100)) ); + } +} diff --git a/include/eigen/test/eigensolver_generalized_real.cpp b/include/eigen/test/eigensolver_generalized_real.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0c99b18a37a2c66bad959c99adede749d88f6dc --- /dev/null +++ b/include/eigen/test/eigensolver_generalized_real.cpp @@ -0,0 +1,140 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_RUNTIME_NO_MALLOC +#include "main.h" +#include +#include +#include + +template void generalized_eigensolver_real(const MatrixType& m) +{ + /* this test covers the following files: + GeneralizedEigenSolver.h + */ + Index rows = m.rows(); + Index cols = m.cols(); + + typedef typename MatrixType::Scalar Scalar; + typedef std::complex ComplexScalar; + typedef Matrix VectorType; + + MatrixType a = MatrixType::Random(rows,cols); + MatrixType b = MatrixType::Random(rows,cols); + MatrixType a1 = MatrixType::Random(rows,cols); + MatrixType b1 = MatrixType::Random(rows,cols); + MatrixType spdA = a.adjoint() * a + a1.adjoint() * a1; + MatrixType spdB = b.adjoint() * b + b1.adjoint() * b1; + + // lets compare to GeneralizedSelfAdjointEigenSolver + { + GeneralizedSelfAdjointEigenSolver symmEig(spdA, spdB); + GeneralizedEigenSolver eig(spdA, spdB); + + VERIFY_IS_EQUAL(eig.eigenvalues().imag().cwiseAbs().maxCoeff(), 0); + + VectorType realEigenvalues = eig.eigenvalues().real(); + std::sort(realEigenvalues.data(), realEigenvalues.data()+realEigenvalues.size()); + VERIFY_IS_APPROX(realEigenvalues, symmEig.eigenvalues()); + + // check eigenvectors + typename GeneralizedEigenSolver::EigenvectorsType D = eig.eigenvalues().asDiagonal(); + typename GeneralizedEigenSolver::EigenvectorsType V = eig.eigenvectors(); + VERIFY_IS_APPROX(spdA*V, spdB*V*D); + } + + // non symmetric case: + { + GeneralizedEigenSolver eig(rows); + // TODO enable full-prealocation of required memory, this probably requires an in-place mode for HessenbergDecomposition + //Eigen::internal::set_is_malloc_allowed(false); + eig.compute(a,b); + //Eigen::internal::set_is_malloc_allowed(true); + for(Index k=0; k tmp = (eig.betas()(k)*a).template cast() - eig.alphas()(k)*b; + if(tmp.size()>1 && tmp.norm()>(std::numeric_limits::min)()) + tmp /= tmp.norm(); + VERIFY_IS_MUCH_SMALLER_THAN( std::abs(tmp.determinant()), Scalar(1) ); + } + // check eigenvectors + typename GeneralizedEigenSolver::EigenvectorsType D = eig.eigenvalues().asDiagonal(); + typename GeneralizedEigenSolver::EigenvectorsType V = eig.eigenvectors(); + VERIFY_IS_APPROX(a*V, b*V*D); + } + + // regression test for bug 1098 + { + GeneralizedSelfAdjointEigenSolver eig1(a.adjoint() * a,b.adjoint() * b); + eig1.compute(a.adjoint() * a,b.adjoint() * b); + GeneralizedEigenSolver eig2(a.adjoint() * a,b.adjoint() * b); + eig2.compute(a.adjoint() * a,b.adjoint() * b); + } + + // check without eigenvectors + { + GeneralizedEigenSolver eig1(spdA, spdB, true); + GeneralizedEigenSolver eig2(spdA, spdB, false); + VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues()); + } +} + +template +void generalized_eigensolver_assert() { + GeneralizedEigenSolver eig; + // all raise assert if uninitialized + VERIFY_RAISES_ASSERT(eig.info()); + VERIFY_RAISES_ASSERT(eig.eigenvectors()); + VERIFY_RAISES_ASSERT(eig.eigenvalues()); + VERIFY_RAISES_ASSERT(eig.alphas()); + VERIFY_RAISES_ASSERT(eig.betas()); + + // none raise assert after compute called + eig.compute(MatrixType::Random(20, 20), MatrixType::Random(20, 20)); + VERIFY(eig.info() == Success); + eig.eigenvectors(); + eig.eigenvalues(); + eig.alphas(); + eig.betas(); + + // eigenvectors() raises assert, if eigenvectors were not requested + eig.compute(MatrixType::Random(20, 20), MatrixType::Random(20, 20), false); + VERIFY(eig.info() == Success); + VERIFY_RAISES_ASSERT(eig.eigenvectors()); + eig.eigenvalues(); + eig.alphas(); + eig.betas(); + + // all except info raise assert if realQZ did not converge + eig.setMaxIterations(0); // force real QZ to fail. + eig.compute(MatrixType::Random(20, 20), MatrixType::Random(20, 20)); + VERIFY(eig.info() == NoConvergence); + VERIFY_RAISES_ASSERT(eig.eigenvectors()); + VERIFY_RAISES_ASSERT(eig.eigenvalues()); + VERIFY_RAISES_ASSERT(eig.alphas()); + VERIFY_RAISES_ASSERT(eig.betas()); +} + +EIGEN_DECLARE_TEST(eigensolver_generalized_real) +{ + for(int i = 0; i < g_repeat; i++) { + int s = 0; + CALL_SUBTEST_1( generalized_eigensolver_real(Matrix4f()) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_2( generalized_eigensolver_real(MatrixXd(s,s)) ); + + // some trivial but implementation-wise special cases + CALL_SUBTEST_2( generalized_eigensolver_real(MatrixXd(1,1)) ); + CALL_SUBTEST_2( generalized_eigensolver_real(MatrixXd(2,2)) ); + CALL_SUBTEST_3( generalized_eigensolver_real(Matrix()) ); + CALL_SUBTEST_4( generalized_eigensolver_real(Matrix2d()) ); + CALL_SUBTEST_5( generalized_eigensolver_assert() ); + TEST_SET_BUT_UNUSED_VARIABLE(s) + } +} diff --git a/include/eigen/test/eigensolver_selfadjoint.cpp b/include/eigen/test/eigensolver_selfadjoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0fb2f4da766209ab394459bdc969e97bba1d6bb5 --- /dev/null +++ b/include/eigen/test/eigensolver_selfadjoint.cpp @@ -0,0 +1,281 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include "svd_fill.h" +#include +#include +#include + + +template void selfadjointeigensolver_essential_check(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + RealScalar eival_eps = numext::mini(test_precision(), NumTraits::dummy_precision()*20000); + + SelfAdjointEigenSolver eiSymm(m); + VERIFY_IS_EQUAL(eiSymm.info(), Success); + + RealScalar scaling = m.cwiseAbs().maxCoeff(); + + if(scaling<(std::numeric_limits::min)()) + { + VERIFY(eiSymm.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits::min)()); + } + else + { + VERIFY_IS_APPROX((m.template selfadjointView() * eiSymm.eigenvectors())/scaling, + (eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal())/scaling); + } + VERIFY_IS_APPROX(m.template selfadjointView().eigenvalues(), eiSymm.eigenvalues()); + VERIFY_IS_UNITARY(eiSymm.eigenvectors()); + + if(m.cols()<=4) + { + SelfAdjointEigenSolver eiDirect; + eiDirect.computeDirect(m); + VERIFY_IS_EQUAL(eiDirect.info(), Success); + if(! eiSymm.eigenvalues().isApprox(eiDirect.eigenvalues(), eival_eps) ) + { + std::cerr << "reference eigenvalues: " << eiSymm.eigenvalues().transpose() << "\n" + << "obtained eigenvalues: " << eiDirect.eigenvalues().transpose() << "\n" + << "diff: " << (eiSymm.eigenvalues()-eiDirect.eigenvalues()).transpose() << "\n" + << "error (eps): " << (eiSymm.eigenvalues()-eiDirect.eigenvalues()).norm() / eiSymm.eigenvalues().norm() << " (" << eival_eps << ")\n"; + } + if(scaling<(std::numeric_limits::min)()) + { + VERIFY(eiDirect.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits::min)()); + } + else + { + VERIFY_IS_APPROX(eiSymm.eigenvalues()/scaling, eiDirect.eigenvalues()/scaling); + VERIFY_IS_APPROX((m.template selfadjointView() * eiDirect.eigenvectors())/scaling, + (eiDirect.eigenvectors() * eiDirect.eigenvalues().asDiagonal())/scaling); + VERIFY_IS_APPROX(m.template selfadjointView().eigenvalues()/scaling, eiDirect.eigenvalues()/scaling); + } + + VERIFY_IS_UNITARY(eiDirect.eigenvectors()); + } +} + +template void selfadjointeigensolver(const MatrixType& m) +{ + /* this test covers the following files: + EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h) + */ + Index rows = m.rows(); + Index cols = m.cols(); + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + RealScalar largerEps = 10*test_precision(); + + MatrixType a = MatrixType::Random(rows,cols); + MatrixType a1 = MatrixType::Random(rows,cols); + MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1; + MatrixType symmC = symmA; + + svd_fill_random(symmA,Symmetric); + + symmA.template triangularView().setZero(); + symmC.template triangularView().setZero(); + + MatrixType b = MatrixType::Random(rows,cols); + MatrixType b1 = MatrixType::Random(rows,cols); + MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1; + symmB.template triangularView().setZero(); + + CALL_SUBTEST( selfadjointeigensolver_essential_check(symmA) ); + + SelfAdjointEigenSolver eiSymm(symmA); + // generalized eigen pb + GeneralizedSelfAdjointEigenSolver eiSymmGen(symmC, symmB); + + SelfAdjointEigenSolver eiSymmNoEivecs(symmA, false); + VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success); + VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues()); + + // generalized eigen problem Ax = lBx + eiSymmGen.compute(symmC, symmB,Ax_lBx); + VERIFY_IS_EQUAL(eiSymmGen.info(), Success); + VERIFY((symmC.template selfadjointView() * eiSymmGen.eigenvectors()).isApprox( + symmB.template selfadjointView() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); + + // generalized eigen problem BAx = lx + eiSymmGen.compute(symmC, symmB,BAx_lx); + VERIFY_IS_EQUAL(eiSymmGen.info(), Success); + VERIFY((symmB.template selfadjointView() * (symmC.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( + (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); + + // generalized eigen problem ABx = lx + eiSymmGen.compute(symmC, symmB,ABx_lx); + VERIFY_IS_EQUAL(eiSymmGen.info(), Success); + VERIFY((symmC.template selfadjointView() * (symmB.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( + (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); + + + eiSymm.compute(symmC); + MatrixType sqrtSymmA = eiSymm.operatorSqrt(); + VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), sqrtSymmA*sqrtSymmA); + VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView()*eiSymm.operatorInverseSqrt()); + + MatrixType id = MatrixType::Identity(rows, cols); + VERIFY_IS_APPROX(id.template selfadjointView().operatorNorm(), RealScalar(1)); + + SelfAdjointEigenSolver eiSymmUninitialized; + VERIFY_RAISES_ASSERT(eiSymmUninitialized.info()); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues()); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors()); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt()); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt()); + + eiSymmUninitialized.compute(symmA, false); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors()); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt()); + VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt()); + + // test Tridiagonalization's methods + Tridiagonalization tridiag(symmC); + VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal()); + VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>()); + Matrix T = tridiag.matrixT(); + if(rows>1 && cols>1) { + // FIXME check that upper and lower part are 0: + //VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView().isZero()); + } + VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal()); + VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>()); + VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint()); + VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), tridiag.matrixQ() * tridiag.matrixT() * tridiag.matrixQ().adjoint()); + + // Test computation of eigenvalues from tridiagonal matrix + if(rows > 1) + { + SelfAdjointEigenSolver eiSymmTridiag; + eiSymmTridiag.computeFromTridiagonal(tridiag.matrixT().diagonal(), tridiag.matrixT().diagonal(-1), ComputeEigenvectors); + VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmTridiag.eigenvalues()); + VERIFY_IS_APPROX(tridiag.matrixT(), eiSymmTridiag.eigenvectors().real() * eiSymmTridiag.eigenvalues().asDiagonal() * eiSymmTridiag.eigenvectors().real().transpose()); + } + + if (rows > 1 && rows < 20) + { + // Test matrix with NaN + symmC(0,0) = std::numeric_limits::quiet_NaN(); + SelfAdjointEigenSolver eiSymmNaN(symmC); + VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence); + } + + // regression test for bug 1098 + { + SelfAdjointEigenSolver eig(a.adjoint() * a); + eig.compute(a.adjoint() * a); + } + + // regression test for bug 478 + { + a.setZero(); + SelfAdjointEigenSolver ei3(a); + VERIFY_IS_EQUAL(ei3.info(), Success); + VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1)); + VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity()); + } +} + +template +void bug_854() +{ + Matrix3d m; + m << 850.961, 51.966, 0, + 51.966, 254.841, 0, + 0, 0, 0; + selfadjointeigensolver_essential_check(m); +} + +template +void bug_1014() +{ + Matrix3d m; + m << 0.11111111111111114658, 0, 0, + 0, 0.11111111111111109107, 0, + 0, 0, 0.11111111111111107719; + selfadjointeigensolver_essential_check(m); +} + +template +void bug_1225() +{ + Matrix3d m1, m2; + m1.setRandom(); + m1 = m1*m1.transpose(); + m2 = m1.triangularView(); + SelfAdjointEigenSolver eig1(m1); + SelfAdjointEigenSolver eig2(m2.selfadjointView()); + VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues()); +} + +template +void bug_1204() +{ + SparseMatrix A(2,2); + A.setIdentity(); + SelfAdjointEigenSolver > eig(A); +} + +EIGEN_DECLARE_TEST(eigensolver_selfadjoint) +{ + int s = 0; + for(int i = 0; i < g_repeat; i++) { + + // trivial test for 1x1 matrices: + CALL_SUBTEST_1( selfadjointeigensolver(Matrix())); + CALL_SUBTEST_1( selfadjointeigensolver(Matrix())); + CALL_SUBTEST_1( selfadjointeigensolver(Matrix, 1, 1>())); + + // very important to test 3x3 and 2x2 matrices since we provide special paths for them + CALL_SUBTEST_12( selfadjointeigensolver(Matrix2f()) ); + CALL_SUBTEST_12( selfadjointeigensolver(Matrix2d()) ); + CALL_SUBTEST_12( selfadjointeigensolver(Matrix2cd()) ); + CALL_SUBTEST_13( selfadjointeigensolver(Matrix3f()) ); + CALL_SUBTEST_13( selfadjointeigensolver(Matrix3d()) ); + CALL_SUBTEST_13( selfadjointeigensolver(Matrix3cd()) ); + CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) ); + CALL_SUBTEST_2( selfadjointeigensolver(Matrix4cd()) ); + + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) ); + CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(s,s)) ); + CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(s,s)) ); + CALL_SUBTEST_9( selfadjointeigensolver(Matrix,Dynamic,Dynamic,RowMajor>(s,s)) ); + TEST_SET_BUT_UNUSED_VARIABLE(s) + + // some trivial but implementation-wise tricky cases + CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(1,1)) ); + CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(2,2)) ); + CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(1,1)) ); + CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(2,2)) ); + CALL_SUBTEST_6( selfadjointeigensolver(Matrix()) ); + CALL_SUBTEST_7( selfadjointeigensolver(Matrix()) ); + } + + CALL_SUBTEST_13( bug_854<0>() ); + CALL_SUBTEST_13( bug_1014<0>() ); + CALL_SUBTEST_13( bug_1204<0>() ); + CALL_SUBTEST_13( bug_1225<0>() ); + + // Test problem size constructors + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_8(SelfAdjointEigenSolver tmp1(s)); + CALL_SUBTEST_8(Tridiagonalization tmp2(s)); + + TEST_SET_BUT_UNUSED_VARIABLE(s) +} + diff --git a/include/eigen/test/geo_hyperplane.cpp b/include/eigen/test/geo_hyperplane.cpp new file mode 100644 index 0000000000000000000000000000000000000000..44b2f2aecb3e8e9b239f93cc586778f0b11b779d --- /dev/null +++ b/include/eigen/test/geo_hyperplane.cpp @@ -0,0 +1,192 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + +template void hyperplane(const HyperplaneType& _plane) +{ + /* this test covers the following files: + Hyperplane.h + */ + using std::abs; + const Index dim = _plane.dim(); + enum { Options = HyperplaneType::Options }; + typedef typename HyperplaneType::Scalar Scalar; + typedef typename HyperplaneType::RealScalar RealScalar; + typedef Matrix VectorType; + typedef Matrix MatrixType; + + VectorType p0 = VectorType::Random(dim); + VectorType p1 = VectorType::Random(dim); + + VectorType n0 = VectorType::Random(dim).normalized(); + VectorType n1 = VectorType::Random(dim).normalized(); + + HyperplaneType pl0(n0, p0); + HyperplaneType pl1(n1, p1); + HyperplaneType pl2 = pl1; + + Scalar s0 = internal::random(); + Scalar s1 = internal::random(); + + VERIFY_IS_APPROX( n1.dot(n1), Scalar(1) ); + + VERIFY_IS_MUCH_SMALLER_THAN( pl0.absDistance(p0), Scalar(1) ); + if(numext::abs2(s0)>RealScalar(1e-6)) + VERIFY_IS_APPROX( pl1.signedDistance(p1 + n1 * s0), s0); + else + VERIFY_IS_MUCH_SMALLER_THAN( abs(pl1.signedDistance(p1 + n1 * s0) - s0), Scalar(1) ); + VERIFY_IS_MUCH_SMALLER_THAN( pl1.signedDistance(pl1.projection(p0)), Scalar(1) ); + VERIFY_IS_MUCH_SMALLER_THAN( pl1.absDistance(p1 + pl1.normal().unitOrthogonal() * s1), Scalar(1) ); + + // transform + if (!NumTraits::IsComplex) + { + MatrixType rot = MatrixType::Random(dim,dim).householderQr().householderQ(); + DiagonalMatrix scaling(VectorType::Random()); + Translation translation(VectorType::Random()); + + while(scaling.diagonal().cwiseAbs().minCoeff()::type OtherScalar; + Hyperplane hp1f = pl1.template cast(); + VERIFY_IS_APPROX(hp1f.template cast(),pl1); + Hyperplane hp1d = pl1.template cast(); + VERIFY_IS_APPROX(hp1d.template cast(),pl1); +} + +template void lines() +{ + using std::abs; + typedef Hyperplane HLine; + typedef ParametrizedLine PLine; + typedef Matrix Vector; + typedef Matrix CoeffsType; + + for(int i = 0; i < 10; i++) + { + Vector center = Vector::Random(); + Vector u = Vector::Random(); + Vector v = Vector::Random(); + Scalar a = internal::random(); + while (abs(a-1) < Scalar(1e-4)) a = internal::random(); + while (u.norm() < Scalar(1e-4)) u = Vector::Random(); + while (v.norm() < Scalar(1e-4)) v = Vector::Random(); + + HLine line_u = HLine::Through(center + u, center + a*u); + HLine line_v = HLine::Through(center + v, center + a*v); + + // the line equations should be normalized so that a^2+b^2=1 + VERIFY_IS_APPROX(line_u.normal().norm(), Scalar(1)); + VERIFY_IS_APPROX(line_v.normal().norm(), Scalar(1)); + + Vector result = line_u.intersection(line_v); + + // the lines should intersect at the point we called "center" + if(abs(a-1) > Scalar(1e-2) && abs(v.normalized().dot(u.normalized())) void planes() +{ + using std::abs; + typedef Hyperplane Plane; + typedef Matrix Vector; + + for(int i = 0; i < 10; i++) + { + Vector v0 = Vector::Random(); + Vector v1(v0), v2(v0); + if(internal::random(0,1)>0.25) + v1 += Vector::Random(); + if(internal::random(0,1)>0.25) + v2 += v1 * std::pow(internal::random(0,1),internal::random(1,16)); + if(internal::random(0,1)>0.25) + v2 += Vector::Random() * std::pow(internal::random(0,1),internal::random(1,16)); + + Plane p0 = Plane::Through(v0, v1, v2); + + VERIFY_IS_APPROX(p0.normal().norm(), Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v0), Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v1), Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v2), Scalar(1)); + } +} + +template void hyperplane_alignment() +{ + typedef Hyperplane Plane3a; + typedef Hyperplane Plane3u; + + EIGEN_ALIGN_MAX Scalar array1[4]; + EIGEN_ALIGN_MAX Scalar array2[4]; + EIGEN_ALIGN_MAX Scalar array3[4+1]; + Scalar* array3u = array3+1; + + Plane3a *p1 = ::new(reinterpret_cast(array1)) Plane3a; + Plane3u *p2 = ::new(reinterpret_cast(array2)) Plane3u; + Plane3u *p3 = ::new(reinterpret_cast(array3u)) Plane3u; + + p1->coeffs().setRandom(); + *p2 = *p1; + *p3 = *p1; + + VERIFY_IS_APPROX(p1->coeffs(), p2->coeffs()); + VERIFY_IS_APPROX(p1->coeffs(), p3->coeffs()); +} + + +EIGEN_DECLARE_TEST(geo_hyperplane) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( hyperplane(Hyperplane()) ); + CALL_SUBTEST_2( hyperplane(Hyperplane()) ); + CALL_SUBTEST_2( hyperplane(Hyperplane()) ); + CALL_SUBTEST_2( hyperplane_alignment() ); + CALL_SUBTEST_3( hyperplane(Hyperplane()) ); + CALL_SUBTEST_4( hyperplane(Hyperplane,5>()) ); + CALL_SUBTEST_1( lines() ); + CALL_SUBTEST_3( lines() ); + CALL_SUBTEST_2( planes() ); + CALL_SUBTEST_5( planes() ); + } +} diff --git a/include/eigen/test/gpu_basic.cu b/include/eigen/test/gpu_basic.cu new file mode 100644 index 0000000000000000000000000000000000000000..e424a93c9e86959635d64734588a97e83158f072 --- /dev/null +++ b/include/eigen/test/gpu_basic.cu @@ -0,0 +1,465 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// workaround issue between gcc >= 4.7 and cuda 5.5 +#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7) + #undef _GLIBCXX_ATOMIC_BUILTINS + #undef _GLIBCXX_USE_INT128 +#endif + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int + +#include "main.h" +#include "gpu_common.h" + +// Check that dense modules can be properly parsed by nvcc +#include + +// struct Foo{ +// EIGEN_DEVICE_FUNC +// void operator()(int i, const float* mats, float* vecs) const { +// using namespace Eigen; +// // Matrix3f M(data); +// // Vector3f x(data+9); +// // Map(data+9) = M.inverse() * x; +// Matrix3f M(mats+i/16); +// Vector3f x(vecs+i*3); +// // using std::min; +// // using std::sqrt; +// Map(vecs+i*3) << x.minCoeff(), 1, 2;// / x.dot(x);//(M.inverse() * x) / x.x(); +// //x = x*2 + x.y() * x + x * x.maxCoeff() - x / x.sum(); +// } +// }; + +template +struct coeff_wise { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + T x1(in+i); + T x2(in+i+1); + T x3(in+i+2); + Map res(out+i*T::MaxSizeAtCompileTime); + + res.array() += (in[0] * x1 + x2).array() * x3.array(); + } +}; + +template +struct complex_sqrt { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef typename T::Scalar ComplexType; + typedef typename T::Scalar::value_type ValueType; + const int num_special_inputs = 18; + + if (i == 0) { + const ValueType nan = std::numeric_limits::quiet_NaN(); + typedef Eigen::Vector SpecialInputs; + SpecialInputs special_in; + special_in.setZero(); + int idx = 0; + special_in[idx++] = ComplexType(0, 0); + special_in[idx++] = ComplexType(-0, 0); + special_in[idx++] = ComplexType(0, -0); + special_in[idx++] = ComplexType(-0, -0); + // GCC's fallback sqrt implementation fails for inf inputs. + // It is called when _GLIBCXX_USE_C99_COMPLEX is false or if + // clang includes the GCC header (which temporarily disables + // _GLIBCXX_USE_C99_COMPLEX) + #if !defined(_GLIBCXX_COMPLEX) || \ + (_GLIBCXX_USE_C99_COMPLEX && !defined(__CLANG_CUDA_WRAPPERS_COMPLEX)) + const ValueType inf = std::numeric_limits::infinity(); + special_in[idx++] = ComplexType(1.0, inf); + special_in[idx++] = ComplexType(nan, inf); + special_in[idx++] = ComplexType(1.0, -inf); + special_in[idx++] = ComplexType(nan, -inf); + special_in[idx++] = ComplexType(-inf, 1.0); + special_in[idx++] = ComplexType(inf, 1.0); + special_in[idx++] = ComplexType(-inf, -1.0); + special_in[idx++] = ComplexType(inf, -1.0); + special_in[idx++] = ComplexType(-inf, nan); + special_in[idx++] = ComplexType(inf, nan); + #endif + special_in[idx++] = ComplexType(1.0, nan); + special_in[idx++] = ComplexType(nan, 1.0); + special_in[idx++] = ComplexType(nan, -1.0); + special_in[idx++] = ComplexType(nan, nan); + + Map special_out(out); + special_out = special_in.cwiseSqrt(); + } + + T x1(in + i); + Map res(out + num_special_inputs + i*T::MaxSizeAtCompileTime); + res = x1.cwiseSqrt(); + } +}; + +template +struct complex_operators { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef typename T::Scalar ComplexType; + typedef typename T::Scalar::value_type ValueType; + const int num_scalar_operators = 24; + const int num_vector_operators = 23; // no unary + operator. + int out_idx = i * (num_scalar_operators + num_vector_operators * T::MaxSizeAtCompileTime); + + // Scalar operators. + const ComplexType a = in[i]; + const ComplexType b = in[i + 1]; + + out[out_idx++] = +a; + out[out_idx++] = -a; + + out[out_idx++] = a + b; + out[out_idx++] = a + numext::real(b); + out[out_idx++] = numext::real(a) + b; + out[out_idx++] = a - b; + out[out_idx++] = a - numext::real(b); + out[out_idx++] = numext::real(a) - b; + out[out_idx++] = a * b; + out[out_idx++] = a * numext::real(b); + out[out_idx++] = numext::real(a) * b; + out[out_idx++] = a / b; + out[out_idx++] = a / numext::real(b); + out[out_idx++] = numext::real(a) / b; + +#if !defined(EIGEN_COMP_MSVC) + out[out_idx] = a; out[out_idx++] += b; + out[out_idx] = a; out[out_idx++] -= b; + out[out_idx] = a; out[out_idx++] *= b; + out[out_idx] = a; out[out_idx++] /= b; +#endif + + const ComplexType true_value = ComplexType(ValueType(1), ValueType(0)); + const ComplexType false_value = ComplexType(ValueType(0), ValueType(0)); + out[out_idx++] = (a == b ? true_value : false_value); + out[out_idx++] = (a == numext::real(b) ? true_value : false_value); + out[out_idx++] = (numext::real(a) == b ? true_value : false_value); + out[out_idx++] = (a != b ? true_value : false_value); + out[out_idx++] = (a != numext::real(b) ? true_value : false_value); + out[out_idx++] = (numext::real(a) != b ? true_value : false_value); + + // Vector versions. + T x1(in + i); + T x2(in + i + 1); + const int res_size = T::MaxSizeAtCompileTime * num_scalar_operators; + const int size = T::MaxSizeAtCompileTime; + int block_idx = 0; + + Map> res(out + out_idx, res_size); + res.segment(block_idx, size) = -x1; + block_idx += size; + + res.segment(block_idx, size) = x1 + x2; + block_idx += size; + res.segment(block_idx, size) = x1 + x2.real(); + block_idx += size; + res.segment(block_idx, size) = x1.real() + x2; + block_idx += size; + res.segment(block_idx, size) = x1 - x2; + block_idx += size; + res.segment(block_idx, size) = x1 - x2.real(); + block_idx += size; + res.segment(block_idx, size) = x1.real() - x2; + block_idx += size; + res.segment(block_idx, size) = x1.array() * x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1.array() * x2.real().array(); + block_idx += size; + res.segment(block_idx, size) = x1.real().array() * x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1.array() / x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1.array() / x2.real().array(); + block_idx += size; + res.segment(block_idx, size) = x1.real().array() / x2.array(); + block_idx += size; + +#if !defined(EIGEN_COMP_MSVC) + res.segment(block_idx, size) = x1; res.segment(block_idx, size) += x2; + block_idx += size; + res.segment(block_idx, size) = x1; res.segment(block_idx, size) -= x2; + block_idx += size; + res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() *= x2.array(); + block_idx += size; + res.segment(block_idx, size) = x1; res.segment(block_idx, size).array() /= x2.array(); + block_idx += size; +#endif + + const T true_vector = T::Constant(true_value); + const T false_vector = T::Constant(false_value); + res.segment(block_idx, size) = (x1 == x2 ? true_vector : false_vector); + block_idx += size; + // Mixing types in equality comparison does not work. + // res.segment(block_idx, size) = (x1 == x2.real() ? true_vector : false_vector); + // block_idx += size; + // res.segment(block_idx, size) = (x1.real() == x2 ? true_vector : false_vector); + // block_idx += size; + res.segment(block_idx, size) = (x1 != x2 ? true_vector : false_vector); + block_idx += size; + // res.segment(block_idx, size) = (x1 != x2.real() ? true_vector : false_vector); + // block_idx += size; + // res.segment(block_idx, size) = (x1.real() != x2 ? true_vector : false_vector); + // block_idx += size; + } +}; + +template +struct replicate { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + T x1(in+i); + int step = x1.size() * 4; + int stride = 3 * step; + + typedef Map > MapType; + MapType(out+i*stride+0*step, x1.rows()*2, x1.cols()*2) = x1.replicate(2,2); + MapType(out+i*stride+1*step, x1.rows()*3, x1.cols()) = in[i] * x1.colwise().replicate(3); + MapType(out+i*stride+2*step, x1.rows(), x1.cols()*3) = in[i] * x1.rowwise().replicate(3); + } +}; + +template +struct alloc_new_delete { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + int offset = 2*i*T::MaxSizeAtCompileTime; + T* x = new T(in + offset); + Eigen::Map u(out + offset); + u = *x; + delete x; + + offset += T::MaxSizeAtCompileTime; + T* y = new T[1]; + y[0] = T(in + offset); + Eigen::Map v(out + offset); + v = y[0]; + delete[] y; + } +}; + +template +struct redux { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + int N = 10; + T x1(in+i); + out[i*N+0] = x1.minCoeff(); + out[i*N+1] = x1.maxCoeff(); + out[i*N+2] = x1.sum(); + out[i*N+3] = x1.prod(); + out[i*N+4] = x1.matrix().squaredNorm(); + out[i*N+5] = x1.matrix().norm(); + out[i*N+6] = x1.colwise().sum().maxCoeff(); + out[i*N+7] = x1.rowwise().maxCoeff().sum(); + out[i*N+8] = x1.matrix().colwise().squaredNorm().sum(); + } +}; + +template +struct prod_test { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const + { + using namespace Eigen; + typedef Matrix T3; + T1 x1(in+i); + T2 x2(in+i+1); + Map res(out+i*T3::MaxSizeAtCompileTime); + res += in[i] * x1 * x2; + } +}; + +template +struct diagonal { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T1::Scalar* in, typename T1::Scalar* out) const + { + using namespace Eigen; + T1 x1(in+i); + Map res(out+i*T2::MaxSizeAtCompileTime); + res += x1.diagonal(); + } +}; + +template +struct eigenvalues_direct { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef Matrix Vec; + T M(in+i); + Map res(out+i*Vec::MaxSizeAtCompileTime); + T A = M*M.adjoint(); + SelfAdjointEigenSolver eig; + eig.computeDirect(A); + res = eig.eigenvalues(); + } +}; + +template +struct eigenvalues { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + typedef Matrix Vec; + T M(in+i); + Map res(out+i*Vec::MaxSizeAtCompileTime); + T A = M*M.adjoint(); + SelfAdjointEigenSolver eig; + eig.compute(A); + res = eig.eigenvalues(); + } +}; + +template +struct matrix_inverse { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + using namespace Eigen; + T M(in+i); + Map res(out+i*T::MaxSizeAtCompileTime); + res = M.inverse(); + } +}; + +template +struct numeric_limits_test { + EIGEN_DEVICE_FUNC + void operator()(int i, const typename T::Scalar* in, typename T::Scalar* out) const + { + EIGEN_UNUSED_VARIABLE(in) + int out_idx = i * 5; + out[out_idx++] = numext::numeric_limits::epsilon(); + out[out_idx++] = (numext::numeric_limits::max)(); + out[out_idx++] = (numext::numeric_limits::min)(); + out[out_idx++] = numext::numeric_limits::infinity(); + out[out_idx++] = numext::numeric_limits::quiet_NaN(); + } +}; + +template +bool verifyIsApproxWithInfsNans(const Type1& a, const Type2& b, typename Type1::Scalar* = 0) // Enabled for Eigen's type only +{ + if (a.rows() != b.rows()) { + return false; + } + if (a.cols() != b.cols()) { + return false; + } + for (Index r = 0; r < a.rows(); ++r) { + for (Index c = 0; c < a.cols(); ++c) { + if (a(r, c) != b(r, c) + && !((numext::isnan)(a(r, c)) && (numext::isnan)(b(r, c))) + && !test_isApprox(a(r, c), b(r, c))) { + return false; + } + } + } + return true; +} + +template +void test_with_infs_nans(const Kernel& ker, int n, const Input& in, Output& out) +{ + Output out_ref, out_gpu; + #if !defined(EIGEN_GPU_COMPILE_PHASE) + out_ref = out_gpu = out; + #else + EIGEN_UNUSED_VARIABLE(in); + EIGEN_UNUSED_VARIABLE(out); + #endif + run_on_cpu (ker, n, in, out_ref); + run_on_gpu(ker, n, in, out_gpu); + #if !defined(EIGEN_GPU_COMPILE_PHASE) + verifyIsApproxWithInfsNans(out_ref, out_gpu); + #endif +} + +EIGEN_DECLARE_TEST(gpu_basic) +{ + ei_test_init_gpu(); + + int nthreads = 100; + Eigen::VectorXf in, out; + Eigen::VectorXcf cfin, cfout; + + #if !defined(EIGEN_GPU_COMPILE_PHASE) + int data_size = nthreads * 512; + in.setRandom(data_size); + out.setConstant(data_size, -1); + cfin.setRandom(data_size); + cfout.setConstant(data_size, -1); + #endif + + CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise(), nthreads, in, out) ); + +#if !defined(EIGEN_USE_HIP) + // FIXME + // These subtests result in a compile failure on the HIP platform + // + // eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error: + // base class 'internal::dense_xpr_base, -1, -1> >::type' + // (aka 'ArrayBase, -1, -1> >') has protected default constructor + CALL_SUBTEST( run_and_compare_to_gpu(replicate(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(replicate(), nthreads, in, out) ); + + // HIP does not support new/delete on device. + CALL_SUBTEST( run_and_compare_to_gpu(alloc_new_delete(), nthreads, in, out) ); +#endif + + CALL_SUBTEST( run_and_compare_to_gpu(redux(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(redux(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(prod_test(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(prod_test(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(diagonal(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(diagonal(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct(), nthreads, in, out) ); + + // Test std::complex. + CALL_SUBTEST( run_and_compare_to_gpu(complex_operators(), nthreads, cfin, cfout) ); + CALL_SUBTEST( test_with_infs_nans(complex_sqrt(), nthreads, cfin, cfout) ); + + // numeric_limits + CALL_SUBTEST( test_with_infs_nans(numeric_limits_test(), 1, in, out) ); + +#if defined(__NVCC__) + // FIXME + // These subtests compiles only with nvcc and fail with HIPCC and clang-cuda + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues(), nthreads, in, out) ); + typedef Matrix Matrix6f; + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues(), nthreads, in, out) ); +#endif +} diff --git a/include/eigen/test/gpu_common.h b/include/eigen/test/gpu_common.h new file mode 100644 index 0000000000000000000000000000000000000000..c37eaa13fc372c792d9ff5ee97fafb0007055e92 --- /dev/null +++ b/include/eigen/test/gpu_common.h @@ -0,0 +1,176 @@ +#ifndef EIGEN_TEST_GPU_COMMON_H +#define EIGEN_TEST_GPU_COMMON_H + +#ifdef EIGEN_USE_HIP + #include + #include +#else + #include + #include + #include +#endif + +#include + +#define EIGEN_USE_GPU +#include + +#if !defined(__CUDACC__) && !defined(__HIPCC__) +dim3 threadIdx, blockDim, blockIdx; +#endif + +template +void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out) +{ + for(int i=0; i +__global__ +EIGEN_HIP_LAUNCH_BOUNDS_1024 +void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input* in, Output* out) +{ + int i = threadIdx.x + blockIdx.x*blockDim.x; + if(i +void run_on_gpu(const Kernel& ker, int n, const Input& in, Output& out) +{ + typename Input::Scalar* d_in; + typename Output::Scalar* d_out; + std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar); + std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar); + + gpuMalloc((void**)(&d_in), in_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice); + + // Simple and non-optimal 1D mapping assuming n is not too large + // That's only for unit testing! + dim3 Blocks(128); + dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) ); + + gpuDeviceSynchronize(); + +#ifdef EIGEN_USE_HIP + hipLaunchKernelGGL(HIP_KERNEL_NAME(run_on_gpu_meta_kernel::type, + typename std::decay::type>), + dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out); +#else + run_on_gpu_meta_kernel<<>>(ker, n, d_in, d_out); +#endif + // Pre-launch errors. + gpuError_t err = gpuGetLastError(); + if (err != gpuSuccess) { + printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); + gpu_assert(false); + } + + // Kernel execution errors. + err = gpuDeviceSynchronize(); + if (err != gpuSuccess) { + printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err)); + gpu_assert(false); + } + + + // check inputs have not been modified + gpuMemcpy(const_cast(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost); + gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost); + + gpuFree(d_in); + gpuFree(d_out); +} + + +template +void run_and_compare_to_gpu(const Kernel& ker, int n, const Input& in, Output& out) +{ + Input in_ref, in_gpu; + Output out_ref, out_gpu; + #if !defined(EIGEN_GPU_COMPILE_PHASE) + in_ref = in_gpu = in; + out_ref = out_gpu = out; + #else + EIGEN_UNUSED_VARIABLE(in); + EIGEN_UNUSED_VARIABLE(out); + #endif + run_on_cpu (ker, n, in_ref, out_ref); + run_on_gpu(ker, n, in_gpu, out_gpu); + #if !defined(EIGEN_GPU_COMPILE_PHASE) + VERIFY_IS_APPROX(in_ref, in_gpu); + VERIFY_IS_APPROX(out_ref, out_gpu); + #endif +} + +struct compile_time_device_info { + EIGEN_DEVICE_FUNC + void operator()(int i, const int* /*in*/, int* info) const + { + if (i == 0) { + EIGEN_UNUSED_VARIABLE(info) + #if defined(__CUDA_ARCH__) + info[0] = int(__CUDA_ARCH__ +0); + #endif + #if defined(EIGEN_HIP_DEVICE_COMPILE) + info[1] = int(EIGEN_HIP_DEVICE_COMPILE +0); + #endif + } + } +}; + +void ei_test_init_gpu() +{ + int device = 0; + gpuDeviceProp_t deviceProp; + gpuGetDeviceProperties(&deviceProp, device); + + ArrayXi dummy(1), info(10); + info = -1; + run_on_gpu(compile_time_device_info(),10,dummy,info); + + + std::cout << "GPU compile-time info:\n"; + + #ifdef EIGEN_CUDACC + std::cout << " EIGEN_CUDACC: " << int(EIGEN_CUDACC) << "\n"; + #endif + + #ifdef EIGEN_CUDA_SDK_VER + std::cout << " EIGEN_CUDA_SDK_VER: " << int(EIGEN_CUDA_SDK_VER) << "\n"; + #endif + + #ifdef EIGEN_COMP_NVCC + std::cout << " EIGEN_COMP_NVCC: " << int(EIGEN_COMP_NVCC) << "\n"; + #endif + + #ifdef EIGEN_HIPCC + std::cout << " EIGEN_HIPCC: " << int(EIGEN_HIPCC) << "\n"; + #endif + + std::cout << " EIGEN_CUDA_ARCH: " << info[0] << "\n"; + std::cout << " EIGEN_HIP_DEVICE_COMPILE: " << info[1] << "\n"; + + std::cout << "GPU device info:\n"; + std::cout << " name: " << deviceProp.name << "\n"; + std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n"; + std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n"; + std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n"; + std::cout << " warpSize: " << deviceProp.warpSize << "\n"; + std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n"; + std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n"; + std::cout << " clockRate: " << deviceProp.clockRate << "\n"; + std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n"; + std::cout << " computeMode: " << deviceProp.computeMode << "\n"; +} + +#endif // EIGEN_TEST_GPU_COMMON_H diff --git a/include/eigen/test/inverse.cpp b/include/eigen/test/inverse.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9cedfa1e1069ce7a15e4056a6ab95fe4cccc7405 --- /dev/null +++ b/include/eigen/test/inverse.cpp @@ -0,0 +1,150 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +template +void inverse_for_fixed_size(const MatrixType&, typename internal::enable_if::type* = 0) +{ +} + +template +void inverse_for_fixed_size(const MatrixType& m1, typename internal::enable_if::type* = 0) +{ + using std::abs; + + MatrixType m2, identity = MatrixType::Identity(); + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + //computeInverseAndDetWithCheck tests + //First: an invertible matrix + bool invertible; + Scalar det; + + m2.setZero(); + m1.computeInverseAndDetWithCheck(m2, det, invertible); + VERIFY(invertible); + VERIFY_IS_APPROX(identity, m1*m2); + VERIFY_IS_APPROX(det, m1.determinant()); + + m2.setZero(); + m1.computeInverseWithCheck(m2, invertible); + VERIFY(invertible); + VERIFY_IS_APPROX(identity, m1*m2); + + //Second: a rank one matrix (not invertible, except for 1x1 matrices) + VectorType v3 = VectorType::Random(); + MatrixType m3 = v3*v3.transpose(), m4; + m3.computeInverseAndDetWithCheck(m4, det, invertible); + VERIFY( m1.rows()==1 ? invertible : !invertible ); + VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1)); + m3.computeInverseWithCheck(m4, invertible); + VERIFY( m1.rows()==1 ? invertible : !invertible ); + + // check with submatrices + { + Matrix m5; + m5.setRandom(); + m5.topLeftCorner(m1.rows(),m1.rows()) = m1; + m2 = m5.template topLeftCorner().inverse(); + VERIFY_IS_APPROX( (m5.template topLeftCorner()), m2.inverse() ); + } +} + +template void inverse(const MatrixType& m) +{ + /* this test covers the following files: + Inverse.h + */ + Index rows = m.rows(); + Index cols = m.cols(); + + typedef typename MatrixType::Scalar Scalar; + + MatrixType m1(rows, cols), + m2(rows, cols), + identity = MatrixType::Identity(rows, rows); + createRandomPIMatrixOfRank(rows,rows,rows,m1); + m2 = m1.inverse(); + VERIFY_IS_APPROX(m1, m2.inverse() ); + + VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5)); + + VERIFY_IS_APPROX(identity, m1.inverse() * m1 ); + VERIFY_IS_APPROX(identity, m1 * m1.inverse() ); + + VERIFY_IS_APPROX(m1, m1.inverse().inverse() ); + + // since for the general case we implement separately row-major and col-major, test that + VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose())); + + inverse_for_fixed_size(m1); + + // check in-place inversion + if(MatrixType::RowsAtCompileTime>=2 && MatrixType::RowsAtCompileTime<=4) + { + // in-place is forbidden + VERIFY_RAISES_ASSERT(m1 = m1.inverse()); + } + else + { + m2 = m1.inverse(); + m1 = m1.inverse(); + VERIFY_IS_APPROX(m1,m2); + } +} + +template +void inverse_zerosized() +{ + Matrix A(0,0); + { + Matrix b, x; + x = A.inverse() * b; + } + { + Matrix b(0,1), x; + x = A.inverse() * b; + VERIFY_IS_EQUAL(x.rows(), 0); + VERIFY_IS_EQUAL(x.cols(), 1); + } +} + +EIGEN_DECLARE_TEST(inverse) +{ + int s = 0; + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( inverse(Matrix()) ); + CALL_SUBTEST_2( inverse(Matrix2d()) ); + CALL_SUBTEST_3( inverse(Matrix3f()) ); + CALL_SUBTEST_4( inverse(Matrix4f()) ); + CALL_SUBTEST_4( inverse(Matrix()) ); + + s = internal::random(50,320); + CALL_SUBTEST_5( inverse(MatrixXf(s,s)) ); + TEST_SET_BUT_UNUSED_VARIABLE(s) + CALL_SUBTEST_5( inverse_zerosized() ); + CALL_SUBTEST_5( inverse(MatrixXf(0, 0)) ); + CALL_SUBTEST_5( inverse(MatrixXf(1, 1)) ); + + s = internal::random(25,100); + CALL_SUBTEST_6( inverse(MatrixXcd(s,s)) ); + TEST_SET_BUT_UNUSED_VARIABLE(s) + + CALL_SUBTEST_7( inverse(Matrix4d()) ); + CALL_SUBTEST_7( inverse(Matrix()) ); + + CALL_SUBTEST_8( inverse(Matrix4cd()) ); + } +} diff --git a/include/eigen/test/jacobisvd.cpp b/include/eigen/test/jacobisvd.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c8818c88bae520e7875e05271c1894b0e60c6845 --- /dev/null +++ b/include/eigen/test/jacobisvd.cpp @@ -0,0 +1,156 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2014 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// We explicitly disable deprecated declarations for this set of tests +// because we purposely verify assertions for the deprecated SVD runtime +// option behavior. +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#elif defined(_MSC_VER) +#pragma warning( disable : 4996 ) +#endif + +// discard stack allocation as that too bypasses malloc +#define EIGEN_STACK_ALLOCATION_LIMIT 0 +#define EIGEN_RUNTIME_NO_MALLOC +#include "main.h" +#include + +#define SVD_DEFAULT(M) JacobiSVD +#define SVD_FOR_MIN_NORM(M) JacobiSVD +#include "svd_common.h" + +// Check all variants of JacobiSVD +template +void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) +{ + MatrixType m = a; + if(pickrandom) + svd_fill_random(m); + + CALL_SUBTEST(( svd_test_all_computation_options >(m, true) )); // check full only + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); + if(m.rows()==m.cols()) + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); +} + +template void jacobisvd_verify_assert(const MatrixType& m) +{ + svd_verify_assert >(m); + svd_verify_assert >(m, true); + svd_verify_assert >(m); + svd_verify_assert >(m); + Index rows = m.rows(); + Index cols = m.cols(); + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime + }; + + + MatrixType a = MatrixType::Zero(rows, cols); + a.setZero(); + + if (ColsAtCompileTime == Dynamic) + { + JacobiSVD svd_fullqr; + VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV)) + VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV)) + VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV)) + } +} + +template +void jacobisvd_method() +{ + enum { Size = MatrixType::RowsAtCompileTime }; + typedef typename MatrixType::RealScalar RealScalar; + typedef Matrix RealVecType; + MatrixType m = MatrixType::Identity(); + VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones()); + VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU()); + VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV()); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); +} + +namespace Foo { +// older compiler require a default constructor for Bar +// cf: https://stackoverflow.com/questions/7411515/ +class Bar {public: Bar() {}}; +bool operator<(const Bar&, const Bar&) { return true; } +} +// regression test for a very strange MSVC issue for which simply +// including SVDBase.h messes up with std::max and custom scalar type +void msvc_workaround() +{ + const Foo::Bar a; + const Foo::Bar b; + std::max EIGEN_NOT_A_MACRO (a,b); +} + +EIGEN_DECLARE_TEST(jacobisvd) +{ + CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) )); + CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) )); + CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) )); + CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) )); + + CALL_SUBTEST_11(svd_all_trivial_2x2(jacobisvd)); + CALL_SUBTEST_12(svd_all_trivial_2x2(jacobisvd)); + + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_3(( jacobisvd() )); + CALL_SUBTEST_4(( jacobisvd() )); + CALL_SUBTEST_5(( jacobisvd >() )); + CALL_SUBTEST_6(( jacobisvd >(Matrix(10,2)) )); + + int r = internal::random(1, 30), + c = internal::random(1, 30); + + TEST_SET_BUT_UNUSED_VARIABLE(r) + TEST_SET_BUT_UNUSED_VARIABLE(c) + + CALL_SUBTEST_10(( jacobisvd(MatrixXd(r,c)) )); + CALL_SUBTEST_7(( jacobisvd(MatrixXf(r,c)) )); + CALL_SUBTEST_8(( jacobisvd(MatrixXcd(r,c)) )); + (void) r; + (void) c; + + // Test on inf/nan matrix + CALL_SUBTEST_7( (svd_inf_nan, MatrixXf>()) ); + CALL_SUBTEST_10( (svd_inf_nan, MatrixXd>()) ); + + // bug1395 test compile-time vectors as input + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix()) )); + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix()) )); + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix(r)) )); + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix(c)) )); + } + + CALL_SUBTEST_7(( jacobisvd(MatrixXf(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + CALL_SUBTEST_8(( jacobisvd(MatrixXcd(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) )); + + // test matrixbase method + CALL_SUBTEST_1(( jacobisvd_method() )); + CALL_SUBTEST_3(( jacobisvd_method() )); + + // Test problem size constructors + CALL_SUBTEST_7( JacobiSVD(10,10) ); + + // Check that preallocation avoids subsequent mallocs + CALL_SUBTEST_9( svd_preallocate() ); + + CALL_SUBTEST_2( svd_underoverflow() ); + + msvc_workaround(); +} diff --git a/include/eigen/test/klu_support.cpp b/include/eigen/test/klu_support.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f806ad50ef566bdb569fae6776a90323ee2076ca --- /dev/null +++ b/include/eigen/test/klu_support.cpp @@ -0,0 +1,32 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS +#include "sparse_solver.h" + +#include + +template void test_klu_support_T() +{ + KLU > klu_colmajor; + KLU > klu_rowmajor; + + check_sparse_square_solving(klu_colmajor); + check_sparse_square_solving(klu_rowmajor); + + //check_sparse_square_determinant(umfpack_colmajor); + //check_sparse_square_determinant(umfpack_rowmajor); +} + +EIGEN_DECLARE_TEST(klu_support) +{ + CALL_SUBTEST_1(test_klu_support_T()); + CALL_SUBTEST_2(test_klu_support_T >()); +} + diff --git a/include/eigen/test/linearstructure.cpp b/include/eigen/test/linearstructure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..46ee5162b84049103500e2997814923f5bfa0243 --- /dev/null +++ b/include/eigen/test/linearstructure.cpp @@ -0,0 +1,147 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2014 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +static bool g_called; +#define EIGEN_SCALAR_BINARY_OP_PLUGIN { g_called |= (!internal::is_same::value); } + +#include "main.h" + +template void linearStructure(const MatrixType& m) +{ + using std::abs; + /* this test covers the following files: + CwiseUnaryOp.h, CwiseBinaryOp.h, SelfCwiseBinaryOp.h + */ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + // this test relies a lot on Random.h, and there's not much more that we can do + // to test it, hence I consider that we will have tested Random.h + MatrixType m1 = MatrixType::Random(rows, cols), + m2 = MatrixType::Random(rows, cols), + m3(rows, cols); + + Scalar s1 = internal::random(); + while (abs(s1)(); + + Index r = internal::random(0, rows-1), + c = internal::random(0, cols-1); + + VERIFY_IS_APPROX(-(-m1), m1); + VERIFY_IS_APPROX(m1+m1, 2*m1); + VERIFY_IS_APPROX(m1+m2-m1, m2); + VERIFY_IS_APPROX(-m2+m1+m2, m1); + VERIFY_IS_APPROX(m1*s1, s1*m1); + VERIFY_IS_APPROX((m1+m2)*s1, s1*m1+s1*m2); + VERIFY_IS_APPROX((-m1+m2)*s1, -s1*m1+s1*m2); + m3 = m2; m3 += m1; + VERIFY_IS_APPROX(m3, m1+m2); + m3 = m2; m3 -= m1; + VERIFY_IS_APPROX(m3, m2-m1); + m3 = m2; m3 *= s1; + VERIFY_IS_APPROX(m3, s1*m2); + if(!NumTraits::IsInteger) + { + m3 = m2; m3 /= s1; + VERIFY_IS_APPROX(m3, m2/s1); + } + + // again, test operator() to check const-qualification + VERIFY_IS_APPROX((-m1)(r,c), -(m1(r,c))); + VERIFY_IS_APPROX((m1-m2)(r,c), (m1(r,c))-(m2(r,c))); + VERIFY_IS_APPROX((m1+m2)(r,c), (m1(r,c))+(m2(r,c))); + VERIFY_IS_APPROX((s1*m1)(r,c), s1*(m1(r,c))); + VERIFY_IS_APPROX((m1*s1)(r,c), (m1(r,c))*s1); + if(!NumTraits::IsInteger) + VERIFY_IS_APPROX((m1/s1)(r,c), (m1(r,c))/s1); + + // use .block to disable vectorization and compare to the vectorized version + VERIFY_IS_APPROX(m1+m1.block(0,0,rows,cols), m1+m1); + VERIFY_IS_APPROX(m1.cwiseProduct(m1.block(0,0,rows,cols)), m1.cwiseProduct(m1)); + VERIFY_IS_APPROX(m1 - m1.block(0,0,rows,cols), m1 - m1); + VERIFY_IS_APPROX(m1.block(0,0,rows,cols) * s1, m1 * s1); +} + +// Make sure that complex * real and real * complex are properly optimized +template void real_complex(DenseIndex rows = MatrixType::RowsAtCompileTime, DenseIndex cols = MatrixType::ColsAtCompileTime) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + RealScalar s = internal::random(); + MatrixType m1 = MatrixType::Random(rows, cols); + + g_called = false; + VERIFY_IS_APPROX(s*m1, Scalar(s)*m1); + VERIFY(g_called && "real * matrix not properly optimized"); + + g_called = false; + VERIFY_IS_APPROX(m1*s, m1*Scalar(s)); + VERIFY(g_called && "matrix * real not properly optimized"); + + g_called = false; + VERIFY_IS_APPROX(m1/s, m1/Scalar(s)); + VERIFY(g_called && "matrix / real not properly optimized"); + + g_called = false; + VERIFY_IS_APPROX(s+m1.array(), Scalar(s)+m1.array()); + VERIFY(g_called && "real + matrix not properly optimized"); + + g_called = false; + VERIFY_IS_APPROX(m1.array()+s, m1.array()+Scalar(s)); + VERIFY(g_called && "matrix + real not properly optimized"); + + g_called = false; + VERIFY_IS_APPROX(s-m1.array(), Scalar(s)-m1.array()); + VERIFY(g_called && "real - matrix not properly optimized"); + + g_called = false; + VERIFY_IS_APPROX(m1.array()-s, m1.array()-Scalar(s)); + VERIFY(g_called && "matrix - real not properly optimized"); +} + +template +void linearstructure_overflow() +{ + // make sure that /=scalar and /scalar do not overflow + // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not + Matrix4d m2, m3; + m3 = m2 = Matrix4d::Random()*1e-20; + m2 = m2 / 4.9e-320; + VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones()); + m3 /= 4.9e-320; + VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones()); +} + +EIGEN_DECLARE_TEST(linearstructure) +{ + g_called = true; + VERIFY(g_called); // avoid `unneeded-internal-declaration` warning. + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( linearStructure(Matrix()) ); + CALL_SUBTEST_2( linearStructure(Matrix2f()) ); + CALL_SUBTEST_3( linearStructure(Vector3d()) ); + CALL_SUBTEST_4( linearStructure(Matrix4d()) ); + CALL_SUBTEST_5( linearStructure(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_6( linearStructure(MatrixXf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( linearStructure(MatrixXi (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_8( linearStructure(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_9( linearStructure(ArrayXXf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_10( linearStructure(ArrayXXcf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + + CALL_SUBTEST_11( real_complex() ); + CALL_SUBTEST_11( real_complex(10,10) ); + CALL_SUBTEST_11( real_complex(10,10) ); + } + CALL_SUBTEST_4( linearstructure_overflow<0>() ); +} diff --git a/include/eigen/test/lu.cpp b/include/eigen/test/lu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1bbadcbf0ff6b65479cba01849503f7c7dc33117 --- /dev/null +++ b/include/eigen/test/lu.cpp @@ -0,0 +1,252 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include "solverbase.h" +using namespace std; + +template +typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { + return m.cwiseAbs().colwise().sum().maxCoeff(); +} + +template void lu_non_invertible() +{ + STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); + + typedef typename MatrixType::RealScalar RealScalar; + /* this test covers the following files: + LU.h + */ + Index rows, cols, cols2; + if(MatrixType::RowsAtCompileTime==Dynamic) + { + rows = internal::random(2,EIGEN_TEST_MAX_SIZE); + } + else + { + rows = MatrixType::RowsAtCompileTime; + } + if(MatrixType::ColsAtCompileTime==Dynamic) + { + cols = internal::random(2,EIGEN_TEST_MAX_SIZE); + cols2 = internal::random(2,EIGEN_TEST_MAX_SIZE); + } + else + { + cols2 = cols = MatrixType::ColsAtCompileTime; + } + + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime + }; + typedef typename internal::kernel_retval_base >::ReturnType KernelMatrixType; + typedef typename internal::image_retval_base >::ReturnType ImageMatrixType; + typedef Matrix + CMatrixType; + typedef Matrix + RMatrixType; + + Index rank = internal::random(1, (std::min)(rows, cols)-1); + + // The image of the zero matrix should consist of a single (zero) column vector + VERIFY((MatrixType::Zero(rows,cols).fullPivLu().image(MatrixType::Zero(rows,cols)).cols() == 1)); + + // The kernel of the zero matrix is the entire space, and thus is an invertible matrix of dimensions cols. + KernelMatrixType kernel = MatrixType::Zero(rows,cols).fullPivLu().kernel(); + VERIFY((kernel.fullPivLu().isInvertible())); + + MatrixType m1(rows, cols), m3(rows, cols2); + CMatrixType m2(cols, cols2); + createRandomPIMatrixOfRank(rank, rows, cols, m1); + + FullPivLU lu; + + // The special value 0.01 below works well in tests. Keep in mind that we're only computing the rank + // of singular values are either 0 or 1. + // So it's not clear at all that the epsilon should play any role there. + lu.setThreshold(RealScalar(0.01)); + lu.compute(m1); + + MatrixType u(rows,cols); + u = lu.matrixLU().template triangularView(); + RMatrixType l = RMatrixType::Identity(rows,rows); + l.block(0,0,rows,(std::min)(rows,cols)).template triangularView() + = lu.matrixLU().block(0,0,rows,(std::min)(rows,cols)); + + VERIFY_IS_APPROX(lu.permutationP() * m1 * lu.permutationQ(), l*u); + + KernelMatrixType m1kernel = lu.kernel(); + ImageMatrixType m1image = lu.image(m1); + + VERIFY_IS_APPROX(m1, lu.reconstructedMatrix()); + VERIFY(rank == lu.rank()); + VERIFY(cols - lu.rank() == lu.dimensionOfKernel()); + VERIFY(!lu.isInjective()); + VERIFY(!lu.isInvertible()); + VERIFY(!lu.isSurjective()); + VERIFY_IS_MUCH_SMALLER_THAN((m1 * m1kernel), m1); + VERIFY(m1image.fullPivLu().rank() == rank); + VERIFY_IS_APPROX(m1 * m1.adjoint() * m1image, m1image); + + check_solverbase(m1, lu, rows, cols, cols2); + + m2 = CMatrixType::Random(cols,cols2); + m3 = m1*m2; + m2 = CMatrixType::Random(cols,cols2); + // test that the code, which does resize(), may be applied to an xpr + m2.block(0,0,m2.rows(),m2.cols()) = lu.solve(m3); + VERIFY_IS_APPROX(m3, m1*m2); +} + +template void lu_invertible() +{ + /* this test covers the following files: + FullPivLU.h + */ + typedef typename NumTraits::Real RealScalar; + Index size = MatrixType::RowsAtCompileTime; + if( size==Dynamic) + size = internal::random(1,EIGEN_TEST_MAX_SIZE); + + MatrixType m1(size, size), m2(size, size), m3(size, size); + FullPivLU lu; + lu.setThreshold(RealScalar(0.01)); + do { + m1 = MatrixType::Random(size,size); + lu.compute(m1); + } while(!lu.isInvertible()); + + VERIFY_IS_APPROX(m1, lu.reconstructedMatrix()); + VERIFY(0 == lu.dimensionOfKernel()); + VERIFY(lu.kernel().cols() == 1); // the kernel() should consist of a single (zero) column vector + VERIFY(size == lu.rank()); + VERIFY(lu.isInjective()); + VERIFY(lu.isSurjective()); + VERIFY(lu.isInvertible()); + VERIFY(lu.image(m1).fullPivLu().isInvertible()); + + check_solverbase(m1, lu, size, size, size); + + MatrixType m1_inverse = lu.inverse(); + m3 = MatrixType::Random(size,size); + m2 = lu.solve(m3); + VERIFY_IS_APPROX(m2, m1_inverse*m3); + + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); + const RealScalar rcond_est = lu.rcond(); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + + // Regression test for Bug 302 + MatrixType m4 = MatrixType::Random(size,size); + VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4); +} + +template void lu_partial_piv(Index size = MatrixType::ColsAtCompileTime) +{ + /* this test covers the following files: + PartialPivLU.h + */ + typedef typename NumTraits::Real RealScalar; + + MatrixType m1(size, size), m2(size, size), m3(size, size); + m1.setRandom(); + PartialPivLU plu(m1); + + STATIC_CHECK(( internal::is_same::StorageIndex,int>::value )); + + VERIFY_IS_APPROX(m1, plu.reconstructedMatrix()); + + check_solverbase(m1, plu, size, size, size); + + MatrixType m1_inverse = plu.inverse(); + m3 = MatrixType::Random(size,size); + m2 = plu.solve(m3); + VERIFY_IS_APPROX(m2, m1_inverse*m3); + + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); + const RealScalar rcond_est = plu.rcond(); + // Verify that the estimate is within a factor of 10 of the truth. + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); +} + +template void lu_verify_assert() +{ + MatrixType tmp; + + FullPivLU lu; + VERIFY_RAISES_ASSERT(lu.matrixLU()) + VERIFY_RAISES_ASSERT(lu.permutationP()) + VERIFY_RAISES_ASSERT(lu.permutationQ()) + VERIFY_RAISES_ASSERT(lu.kernel()) + VERIFY_RAISES_ASSERT(lu.image(tmp)) + VERIFY_RAISES_ASSERT(lu.solve(tmp)) + VERIFY_RAISES_ASSERT(lu.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(lu.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(lu.determinant()) + VERIFY_RAISES_ASSERT(lu.rank()) + VERIFY_RAISES_ASSERT(lu.dimensionOfKernel()) + VERIFY_RAISES_ASSERT(lu.isInjective()) + VERIFY_RAISES_ASSERT(lu.isSurjective()) + VERIFY_RAISES_ASSERT(lu.isInvertible()) + VERIFY_RAISES_ASSERT(lu.inverse()) + + PartialPivLU plu; + VERIFY_RAISES_ASSERT(plu.matrixLU()) + VERIFY_RAISES_ASSERT(plu.permutationP()) + VERIFY_RAISES_ASSERT(plu.solve(tmp)) + VERIFY_RAISES_ASSERT(plu.transpose().solve(tmp)) + VERIFY_RAISES_ASSERT(plu.adjoint().solve(tmp)) + VERIFY_RAISES_ASSERT(plu.determinant()) + VERIFY_RAISES_ASSERT(plu.inverse()) +} + +EIGEN_DECLARE_TEST(lu) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( lu_non_invertible() ); + CALL_SUBTEST_1( lu_invertible() ); + CALL_SUBTEST_1( lu_verify_assert() ); + CALL_SUBTEST_1( lu_partial_piv() ); + + CALL_SUBTEST_2( (lu_non_invertible >()) ); + CALL_SUBTEST_2( (lu_verify_assert >()) ); + CALL_SUBTEST_2( lu_partial_piv() ); + CALL_SUBTEST_2( lu_partial_piv() ); + CALL_SUBTEST_2( (lu_partial_piv >()) ); + + CALL_SUBTEST_3( lu_non_invertible() ); + CALL_SUBTEST_3( lu_invertible() ); + CALL_SUBTEST_3( lu_verify_assert() ); + + CALL_SUBTEST_4( lu_non_invertible() ); + CALL_SUBTEST_4( lu_invertible() ); + CALL_SUBTEST_4( lu_partial_piv(internal::random(1,EIGEN_TEST_MAX_SIZE)) ); + CALL_SUBTEST_4( lu_verify_assert() ); + + CALL_SUBTEST_5( lu_non_invertible() ); + CALL_SUBTEST_5( lu_invertible() ); + CALL_SUBTEST_5( lu_verify_assert() ); + + CALL_SUBTEST_6( lu_non_invertible() ); + CALL_SUBTEST_6( lu_invertible() ); + CALL_SUBTEST_6( lu_partial_piv(internal::random(1,EIGEN_TEST_MAX_SIZE)) ); + CALL_SUBTEST_6( lu_verify_assert() ); + + CALL_SUBTEST_7(( lu_non_invertible >() )); + + // Test problem size constructors + CALL_SUBTEST_9( PartialPivLU(10) ); + CALL_SUBTEST_9( FullPivLU(10, 20); ); + } +} diff --git a/include/eigen/test/mapped_matrix.cpp b/include/eigen/test/mapped_matrix.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0ea136ae6293b7d224db9745001e91f1157d5325 --- /dev/null +++ b/include/eigen/test/mapped_matrix.cpp @@ -0,0 +1,207 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NO_STATIC_ASSERT +#define EIGEN_NO_STATIC_ASSERT // turn static asserts into runtime asserts in order to check them +#endif + +#include "main.h" + +#define EIGEN_TESTMAP_MAX_SIZE 256 + +template void map_class_vector(const VectorType& m) +{ + typedef typename VectorType::Scalar Scalar; + + Index size = m.size(); + + Scalar* array1 = internal::aligned_new(size); + Scalar* array2 = internal::aligned_new(size); + Scalar* array3 = new Scalar[size+1]; + Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3; + Scalar array4[EIGEN_TESTMAP_MAX_SIZE]; + + Map(array1, size) = VectorType::Random(size); + Map(array2, size) = Map(array1, size); + Map(array3unaligned, size) = Map(array1, size); + Map(array4, size) = Map(array1, size); + VectorType ma1 = Map(array1, size); + VectorType ma2 = Map(array2, size); + VectorType ma3 = Map(array3unaligned, size); + VectorType ma4 = Map(array4, size); + VERIFY_IS_EQUAL(ma1, ma2); + VERIFY_IS_EQUAL(ma1, ma3); + VERIFY_IS_EQUAL(ma1, ma4); + #ifdef EIGEN_VECTORIZE + if(internal::packet_traits::Vectorizable && size>=AlignedMax) + VERIFY_RAISES_ASSERT((Map(array3unaligned, size))) + #endif + + internal::aligned_delete(array1, size); + internal::aligned_delete(array2, size); + delete[] array3; +} + +template void map_class_matrix(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + + Index rows = m.rows(), cols = m.cols(), size = rows*cols; + Scalar s1 = internal::random(); + + // array1 and array2 -> aligned heap allocation + Scalar* array1 = internal::aligned_new(size); + for(int i = 0; i < size; i++) array1[i] = Scalar(1); + Scalar* array2 = internal::aligned_new(size); + for(int i = 0; i < size; i++) array2[i] = Scalar(1); + // array3unaligned -> unaligned pointer to heap + Scalar* array3 = new Scalar[size+1]; + Index sizep1 = size + 1; // <- without this temporary MSVC 2103 generates bad code + for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1); + Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3; + Scalar array4[256]; + if(size<=256) + for(int i = 0; i < size; i++) array4[i] = Scalar(1); + + Map map1(array1, rows, cols); + Map map2(array2, rows, cols); + Map map3(array3unaligned, rows, cols); + Map map4(array4, rows, cols); + + VERIFY_IS_EQUAL(map1, MatrixType::Ones(rows,cols)); + VERIFY_IS_EQUAL(map2, MatrixType::Ones(rows,cols)); + VERIFY_IS_EQUAL(map3, MatrixType::Ones(rows,cols)); + map1 = MatrixType::Random(rows,cols); + map2 = map1; + map3 = map1; + MatrixType ma1 = map1; + MatrixType ma2 = map2; + MatrixType ma3 = map3; + VERIFY_IS_EQUAL(map1, map2); + VERIFY_IS_EQUAL(map1, map3); + VERIFY_IS_EQUAL(ma1, ma2); + VERIFY_IS_EQUAL(ma1, ma3); + VERIFY_IS_EQUAL(ma1, map3); + + VERIFY_IS_APPROX(s1*map1, s1*map2); + VERIFY_IS_APPROX(s1*ma1, s1*ma2); + VERIFY_IS_EQUAL(s1*ma1, s1*ma3); + VERIFY_IS_APPROX(s1*map1, s1*map3); + + map2 *= s1; + map3 *= s1; + VERIFY_IS_APPROX(s1*map1, map2); + VERIFY_IS_APPROX(s1*map1, map3); + + if(size<=256) + { + VERIFY_IS_EQUAL(map4, MatrixType::Ones(rows,cols)); + map4 = map1; + MatrixType ma4 = map4; + VERIFY_IS_EQUAL(map1, map4); + VERIFY_IS_EQUAL(ma1, map4); + VERIFY_IS_EQUAL(ma1, ma4); + VERIFY_IS_APPROX(s1*map1, s1*map4); + + map4 *= s1; + VERIFY_IS_APPROX(s1*map1, map4); + } + + internal::aligned_delete(array1, size); + internal::aligned_delete(array2, size); + delete[] array3; +} + +template void map_static_methods(const VectorType& m) +{ + typedef typename VectorType::Scalar Scalar; + + Index size = m.size(); + + Scalar* array1 = internal::aligned_new(size); + Scalar* array2 = internal::aligned_new(size); + Scalar* array3 = new Scalar[size+1]; + Scalar* array3unaligned = internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES == 0 ? array3+1 : array3; + + VectorType::MapAligned(array1, size) = VectorType::Random(size); + VectorType::Map(array2, size) = VectorType::Map(array1, size); + VectorType::Map(array3unaligned, size) = VectorType::Map(array1, size); + VectorType ma1 = VectorType::Map(array1, size); + VectorType ma2 = VectorType::MapAligned(array2, size); + VectorType ma3 = VectorType::Map(array3unaligned, size); + VERIFY_IS_EQUAL(ma1, ma2); + VERIFY_IS_EQUAL(ma1, ma3); + + internal::aligned_delete(array1, size); + internal::aligned_delete(array2, size); + delete[] array3; +} + +template void check_const_correctness(const PlainObjectType&) +{ + // there's a lot that we can't test here while still having this test compile! + // the only possible approach would be to run a script trying to compile stuff and checking that it fails. + // CMake can help with that. + + // verify that map-to-const don't have LvalueBit + typedef typename internal::add_const::type ConstPlainObjectType; + VERIFY( !(internal::traits >::Flags & LvalueBit) ); + VERIFY( !(internal::traits >::Flags & LvalueBit) ); + VERIFY( !(Map::Flags & LvalueBit) ); + VERIFY( !(Map::Flags & LvalueBit) ); +} + +template +void map_not_aligned_on_scalar() +{ + typedef Matrix MatrixType; + Index size = 11; + Scalar* array1 = internal::aligned_new((size+1)*(size+1)+1); + Scalar* array2 = reinterpret_cast(sizeof(Scalar)/2+std::size_t(array1)); + Map > map2(array2, size, size, OuterStride<>(size+1)); + MatrixType m2 = MatrixType::Random(size,size); + map2 = m2; + VERIFY_IS_EQUAL(m2, map2); + + typedef Matrix VectorType; + Map map3(array2, size); + MatrixType v3 = VectorType::Random(size); + map3 = v3; + VERIFY_IS_EQUAL(v3, map3); + + internal::aligned_delete(array1, (size+1)*(size+1)+1); +} + +EIGEN_DECLARE_TEST(mapped_matrix) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( map_class_vector(Matrix()) ); + CALL_SUBTEST_1( check_const_correctness(Matrix()) ); + CALL_SUBTEST_2( map_class_vector(Vector4d()) ); + CALL_SUBTEST_2( map_class_vector(VectorXd(13)) ); + CALL_SUBTEST_2( check_const_correctness(Matrix4d()) ); + CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); + CALL_SUBTEST_4( map_class_vector(VectorXcf(8)) ); + CALL_SUBTEST_5( map_class_vector(VectorXi(12)) ); + CALL_SUBTEST_5( check_const_correctness(VectorXi(12)) ); + + CALL_SUBTEST_1( map_class_matrix(Matrix()) ); + CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); + CALL_SUBTEST_11( map_class_matrix(Matrix()) ); + CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random(1,10),internal::random(1,10))) ); + CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random(1,10),internal::random(1,10))) ); + + CALL_SUBTEST_6( map_static_methods(Matrix()) ); + CALL_SUBTEST_7( map_static_methods(Vector3f()) ); + CALL_SUBTEST_8( map_static_methods(RowVector3d()) ); + CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) ); + CALL_SUBTEST_10( map_static_methods(VectorXf(12)) ); + CALL_SUBTEST_11( map_not_aligned_on_scalar() ); + } +} diff --git a/include/eigen/test/mapstaticmethods.cpp b/include/eigen/test/mapstaticmethods.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d0128ba94f9b702a3105e76b287662a51dd2d5b2 --- /dev/null +++ b/include/eigen/test/mapstaticmethods.cpp @@ -0,0 +1,177 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +// GCC<=4.8 has spurious shadow warnings, because `ptr` re-appears inside template instantiations +// workaround: put these in an anonymous namespace +namespace { +float *ptr; +const float *const_ptr; +} + +template +struct mapstaticmethods_impl {}; + +template +struct mapstaticmethods_impl +{ + static void run(const PlainObjectType& m) + { + mapstaticmethods_impl::run(m); + + int i = internal::random(2,5), j = internal::random(2,5); + + PlainObjectType::Map(ptr).setZero(); + PlainObjectType::MapAligned(ptr).setZero(); + PlainObjectType::Map(const_ptr).sum(); + PlainObjectType::MapAligned(const_ptr).sum(); + + PlainObjectType::Map(ptr, InnerStride<>(i)).setZero(); + PlainObjectType::MapAligned(ptr, InnerStride<>(i)).setZero(); + PlainObjectType::Map(const_ptr, InnerStride<>(i)).sum(); + PlainObjectType::MapAligned(const_ptr, InnerStride<>(i)).sum(); + + PlainObjectType::Map(ptr, InnerStride<2>()).setZero(); + PlainObjectType::MapAligned(ptr, InnerStride<3>()).setZero(); + PlainObjectType::Map(const_ptr, InnerStride<4>()).sum(); + PlainObjectType::MapAligned(const_ptr, InnerStride<5>()).sum(); + + PlainObjectType::Map(ptr, OuterStride<>(i)).setZero(); + PlainObjectType::MapAligned(ptr, OuterStride<>(i)).setZero(); + PlainObjectType::Map(const_ptr, OuterStride<>(i)).sum(); + PlainObjectType::MapAligned(const_ptr, OuterStride<>(i)).sum(); + + PlainObjectType::Map(ptr, OuterStride<2>()).setZero(); + PlainObjectType::MapAligned(ptr, OuterStride<3>()).setZero(); + PlainObjectType::Map(const_ptr, OuterStride<4>()).sum(); + PlainObjectType::MapAligned(const_ptr, OuterStride<5>()).sum(); + + PlainObjectType::Map(ptr, Stride(i,j)).setZero(); + PlainObjectType::MapAligned(ptr, Stride<2,Dynamic>(2,i)).setZero(); + PlainObjectType::Map(const_ptr, Stride(i,3)).sum(); + PlainObjectType::MapAligned(const_ptr, Stride(i,j)).sum(); + + PlainObjectType::Map(ptr, Stride<2,3>()).setZero(); + PlainObjectType::MapAligned(ptr, Stride<3,4>()).setZero(); + PlainObjectType::Map(const_ptr, Stride<2,4>()).sum(); + PlainObjectType::MapAligned(const_ptr, Stride<5,3>()).sum(); + } +}; + +template +struct mapstaticmethods_impl +{ + static void run(const PlainObjectType& m) + { + Index rows = m.rows(), cols = m.cols(); + + int i = internal::random(2,5), j = internal::random(2,5); + + PlainObjectType::Map(ptr, rows, cols).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols).setZero(); + PlainObjectType::Map(const_ptr, rows, cols).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols).sum(); + + PlainObjectType::Map(ptr, rows, cols, InnerStride<>(i)).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols, InnerStride<>(i)).setZero(); + PlainObjectType::Map(const_ptr, rows, cols, InnerStride<>(i)).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols, InnerStride<>(i)).sum(); + + PlainObjectType::Map(ptr, rows, cols, InnerStride<2>()).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols, InnerStride<3>()).setZero(); + PlainObjectType::Map(const_ptr, rows, cols, InnerStride<4>()).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols, InnerStride<5>()).sum(); + + PlainObjectType::Map(ptr, rows, cols, OuterStride<>(i)).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols, OuterStride<>(i)).setZero(); + PlainObjectType::Map(const_ptr, rows, cols, OuterStride<>(i)).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols, OuterStride<>(i)).sum(); + + PlainObjectType::Map(ptr, rows, cols, OuterStride<2>()).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols, OuterStride<3>()).setZero(); + PlainObjectType::Map(const_ptr, rows, cols, OuterStride<4>()).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols, OuterStride<5>()).sum(); + + PlainObjectType::Map(ptr, rows, cols, Stride(i,j)).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols, Stride<2,Dynamic>(2,i)).setZero(); + PlainObjectType::Map(const_ptr, rows, cols, Stride(i,3)).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols, Stride(i,j)).sum(); + + PlainObjectType::Map(ptr, rows, cols, Stride<2,3>()).setZero(); + PlainObjectType::MapAligned(ptr, rows, cols, Stride<3,4>()).setZero(); + PlainObjectType::Map(const_ptr, rows, cols, Stride<2,4>()).sum(); + PlainObjectType::MapAligned(const_ptr, rows, cols, Stride<5,3>()).sum(); + } +}; + +template +struct mapstaticmethods_impl +{ + static void run(const PlainObjectType& v) + { + Index size = v.size(); + + int i = internal::random(2,5); + + PlainObjectType::Map(ptr, size).setZero(); + PlainObjectType::MapAligned(ptr, size).setZero(); + PlainObjectType::Map(const_ptr, size).sum(); + PlainObjectType::MapAligned(const_ptr, size).sum(); + + PlainObjectType::Map(ptr, size, InnerStride<>(i)).setZero(); + PlainObjectType::MapAligned(ptr, size, InnerStride<>(i)).setZero(); + PlainObjectType::Map(const_ptr, size, InnerStride<>(i)).sum(); + PlainObjectType::MapAligned(const_ptr, size, InnerStride<>(i)).sum(); + + PlainObjectType::Map(ptr, size, InnerStride<2>()).setZero(); + PlainObjectType::MapAligned(ptr, size, InnerStride<3>()).setZero(); + PlainObjectType::Map(const_ptr, size, InnerStride<4>()).sum(); + PlainObjectType::MapAligned(const_ptr, size, InnerStride<5>()).sum(); + } +}; + +template +void mapstaticmethods(const PlainObjectType& m) +{ + mapstaticmethods_impl::run(m); + VERIFY(true); // just to avoid 'unused function' warning +} + +EIGEN_DECLARE_TEST(mapstaticmethods) +{ + ptr = internal::aligned_new(1000); + for(int i = 0; i < 1000; i++) ptr[i] = float(i); + + const_ptr = ptr; + + CALL_SUBTEST_1(( mapstaticmethods(Matrix()) )); + CALL_SUBTEST_1(( mapstaticmethods(Vector2f()) )); + CALL_SUBTEST_2(( mapstaticmethods(Vector3f()) )); + CALL_SUBTEST_2(( mapstaticmethods(Matrix2f()) )); + CALL_SUBTEST_3(( mapstaticmethods(Matrix4f()) )); + CALL_SUBTEST_3(( mapstaticmethods(Array4f()) )); + CALL_SUBTEST_4(( mapstaticmethods(Array3f()) )); + CALL_SUBTEST_4(( mapstaticmethods(Array33f()) )); + CALL_SUBTEST_5(( mapstaticmethods(Array44f()) )); + CALL_SUBTEST_5(( mapstaticmethods(VectorXf(1)) )); + CALL_SUBTEST_5(( mapstaticmethods(VectorXf(8)) )); + CALL_SUBTEST_6(( mapstaticmethods(MatrixXf(1,1)) )); + CALL_SUBTEST_6(( mapstaticmethods(MatrixXf(5,7)) )); + CALL_SUBTEST_7(( mapstaticmethods(ArrayXf(1)) )); + CALL_SUBTEST_7(( mapstaticmethods(ArrayXf(5)) )); + CALL_SUBTEST_8(( mapstaticmethods(ArrayXXf(1,1)) )); + CALL_SUBTEST_8(( mapstaticmethods(ArrayXXf(8,6)) )); + + internal::aligned_delete(ptr, 1000); +} + diff --git a/include/eigen/test/mapstride.cpp b/include/eigen/test/mapstride.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fde73f2eccf62f48907812f16acbfad31bb03d91 --- /dev/null +++ b/include/eigen/test/mapstride.cpp @@ -0,0 +1,260 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +template void map_class_vector(const VectorType& m) +{ + typedef typename VectorType::Scalar Scalar; + + Index size = m.size(); + + VectorType v = VectorType::Random(size); + + Index arraysize = 3*size; + + Scalar* a_array = internal::aligned_new(arraysize+1); + Scalar* array = a_array; + if(Alignment!=Aligned) + array = (Scalar*)(internal::IntPtr(a_array) + (internal::packet_traits::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits::Real))); + + { + Map > map(array, size); + map = v; + for(int i = 0; i < size; ++i) + { + VERIFY(array[3*i] == v[i]); + VERIFY(map[i] == v[i]); + } + } + + { + Map > map(array, size, InnerStride(2)); + map = v; + for(int i = 0; i < size; ++i) + { + VERIFY(array[2*i] == v[i]); + VERIFY(map[i] == v[i]); + } + } + + internal::aligned_delete(a_array, arraysize+1); +} + +template void map_class_matrix(const MatrixType& _m) +{ + typedef typename MatrixType::Scalar Scalar; + + Index rows = _m.rows(), cols = _m.cols(); + + MatrixType m = MatrixType::Random(rows,cols); + Scalar s1 = internal::random(); + + Index arraysize = 4*(rows+4)*(cols+4); + + Scalar* a_array1 = internal::aligned_new(arraysize+1); + Scalar* array1 = a_array1; + if(Alignment!=Aligned) + array1 = (Scalar*)(internal::IntPtr(a_array1) + (internal::packet_traits::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits::Real))); + + Scalar a_array2[256]; + Scalar* array2 = a_array2; + if(Alignment!=Aligned) + array2 = (Scalar*)(internal::IntPtr(a_array2) + (internal::packet_traits::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits::Real))); + else + array2 = (Scalar*)(((internal::UIntPtr(a_array2)+EIGEN_MAX_ALIGN_BYTES-1)/EIGEN_MAX_ALIGN_BYTES)*EIGEN_MAX_ALIGN_BYTES); + Index maxsize2 = a_array2 - array2 + 256; + + // test no inner stride and some dynamic outer stride + for(int k=0; k<2; ++k) + { + if(k==1 && (m.innerSize()+1)*m.outerSize() > maxsize2) + break; + Scalar* array = (k==0 ? array1 : array2); + + Map > map(array, rows, cols, OuterStride(m.innerSize()+1)); + map = m; + VERIFY(map.outerStride() == map.innerSize()+1); + for(int i = 0; i < m.outerSize(); ++i) + for(int j = 0; j < m.innerSize(); ++j) + { + VERIFY(array[map.outerStride()*i+j] == m.coeffByOuterInner(i,j)); + VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); + } + VERIFY_IS_APPROX(s1*map,s1*m); + map *= s1; + VERIFY_IS_APPROX(map,s1*m); + } + + // test no inner stride and an outer stride of +4. This is quite important as for fixed-size matrices, + // this allows to hit the special case where it's vectorizable. + for(int k=0; k<2; ++k) + { + if(k==1 && (m.innerSize()+4)*m.outerSize() > maxsize2) + break; + Scalar* array = (k==0 ? array1 : array2); + + enum { + InnerSize = MatrixType::InnerSizeAtCompileTime, + OuterStrideAtCompileTime = InnerSize==Dynamic ? Dynamic : InnerSize+4 + }; + Map > + map(array, rows, cols, OuterStride(m.innerSize()+4)); + map = m; + VERIFY(map.outerStride() == map.innerSize()+4); + for(int i = 0; i < m.outerSize(); ++i) + for(int j = 0; j < m.innerSize(); ++j) + { + VERIFY(array[map.outerStride()*i+j] == m.coeffByOuterInner(i,j)); + VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); + } + VERIFY_IS_APPROX(s1*map,s1*m); + map *= s1; + VERIFY_IS_APPROX(map,s1*m); + } + + // test both inner stride and outer stride + for(int k=0; k<2; ++k) + { + if(k==1 && (2*m.innerSize()+1)*(m.outerSize()*2) > maxsize2) + break; + Scalar* array = (k==0 ? array1 : array2); + + Map > map(array, rows, cols, Stride(2*m.innerSize()+1, 2)); + map = m; + VERIFY(map.outerStride() == 2*map.innerSize()+1); + VERIFY(map.innerStride() == 2); + for(int i = 0; i < m.outerSize(); ++i) + for(int j = 0; j < m.innerSize(); ++j) + { + VERIFY(array[map.outerStride()*i+map.innerStride()*j] == m.coeffByOuterInner(i,j)); + VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); + } + VERIFY_IS_APPROX(s1*map,s1*m); + map *= s1; + VERIFY_IS_APPROX(map,s1*m); + } + + // test inner stride and no outer stride + for(int k=0; k<2; ++k) + { + if(k==1 && (m.innerSize()*2)*m.outerSize() > maxsize2) + break; + Scalar* array = (k==0 ? array1 : array2); + + Map > map(array, rows, cols, InnerStride(2)); + map = m; + VERIFY(map.outerStride() == map.innerSize()*2); + for(int i = 0; i < m.outerSize(); ++i) + for(int j = 0; j < m.innerSize(); ++j) + { + VERIFY(array[map.innerSize()*i*2+j*2] == m.coeffByOuterInner(i,j)); + VERIFY(map.coeffByOuterInner(i,j) == m.coeffByOuterInner(i,j)); + } + VERIFY_IS_APPROX(s1*map,s1*m); + map *= s1; + VERIFY_IS_APPROX(map,s1*m); + } + + // test negative strides + { + Matrix::Map(a_array1, arraysize+1).setRandom(); + Index outerstride = m.innerSize()+4; + Scalar* array = array1; + + { + Map > map1(array, rows, cols, OuterStride<>( outerstride)); + Map > map2(array+(m.outerSize()-1)*outerstride, rows, cols, OuterStride<>(-outerstride)); + if(MatrixType::IsRowMajor) VERIFY_IS_APPROX(map1.colwise().reverse(), map2); + else VERIFY_IS_APPROX(map1.rowwise().reverse(), map2); + } + + { + Map > map1(array, rows, cols, OuterStride<>( outerstride)); + Map > map2(array+(m.outerSize()-1)*outerstride+m.innerSize()-1, rows, cols, Stride(-outerstride,-1)); + VERIFY_IS_APPROX(map1.reverse(), map2); + } + + { + Map > map1(array, rows, cols, OuterStride<>( outerstride)); + Map > map2(array+(m.outerSize()-1)*outerstride+m.innerSize()-1, rows, cols, Stride(-outerstride,-1)); + VERIFY_IS_APPROX(map1.reverse(), map2); + } + } + + internal::aligned_delete(a_array1, arraysize+1); +} + +// Additional tests for inner-stride but no outer-stride +template +void bug1453() +{ + const int data[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; + typedef Matrix RowMatrixXi; + typedef Matrix ColMatrix23i; + typedef Matrix ColMatrix32i; + typedef Matrix RowMatrix23i; + typedef Matrix RowMatrix32i; + + VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(MatrixXi::Map(data, 2, 3, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + VERIFY_IS_APPROX(MatrixXi::Map(data, 3, 2, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 2, 3, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); + VERIFY_IS_APPROX(RowMatrixXi::Map(data, 3, 2, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); + + VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(ColMatrix23i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 2, 3, Stride<4,2>())); + VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<2>()), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + VERIFY_IS_APPROX(ColMatrix32i::Map(data, InnerStride<>(2)), MatrixXi::Map(data, 3, 2, Stride<6,2>())); + + VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrix23i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 2, 3, Stride<6,2>())); + VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<2>()), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); + VERIFY_IS_APPROX(RowMatrix32i::Map(data, InnerStride<>(2)), RowMatrixXi::Map(data, 3, 2, Stride<4,2>())); +} + +EIGEN_DECLARE_TEST(mapstride) +{ + for(int i = 0; i < g_repeat; i++) { + int maxn = 3; + CALL_SUBTEST_1( map_class_vector(Matrix()) ); + CALL_SUBTEST_1( map_class_vector(Matrix()) ); + CALL_SUBTEST_2( map_class_vector(Vector4d()) ); + CALL_SUBTEST_2( map_class_vector(Vector4d()) ); + CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); + CALL_SUBTEST_3( map_class_vector(RowVector4f()) ); + CALL_SUBTEST_4( map_class_vector(VectorXcf(internal::random(1,maxn))) ); + CALL_SUBTEST_4( map_class_vector(VectorXcf(internal::random(1,maxn))) ); + CALL_SUBTEST_5( map_class_vector(VectorXi(internal::random(1,maxn))) ); + CALL_SUBTEST_5( map_class_vector(VectorXi(internal::random(1,maxn))) ); + + CALL_SUBTEST_1( map_class_matrix(Matrix()) ); + CALL_SUBTEST_1( map_class_matrix(Matrix()) ); + CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); + CALL_SUBTEST_2( map_class_matrix(Matrix4d()) ); + CALL_SUBTEST_3( map_class_matrix(Matrix()) ); + CALL_SUBTEST_3( map_class_matrix(Matrix()) ); + CALL_SUBTEST_3( map_class_matrix(Matrix()) ); + CALL_SUBTEST_3( map_class_matrix(Matrix()) ); + CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random(1,maxn),internal::random(1,maxn))) ); + CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random(1,maxn),internal::random(1,maxn))) ); + CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random(1,maxn),internal::random(1,maxn))) ); + CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random(1,maxn),internal::random(1,maxn))) ); + CALL_SUBTEST_6( map_class_matrix(MatrixXcd(internal::random(1,maxn),internal::random(1,maxn))) ); + CALL_SUBTEST_6( map_class_matrix(MatrixXcd(internal::random(1,maxn),internal::random(1,maxn))) ); + + CALL_SUBTEST_5( bug1453<0>() ); + + TEST_SET_BUT_UNUSED_VARIABLE(maxn); + } +} diff --git a/include/eigen/test/nomalloc.cpp b/include/eigen/test/nomalloc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cb4c073e941895b9e72f6d39cdb8ae70f707598e --- /dev/null +++ b/include/eigen/test/nomalloc.cpp @@ -0,0 +1,228 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// discard stack allocation as that too bypasses malloc +#define EIGEN_STACK_ALLOCATION_LIMIT 0 +// heap allocation will raise an assert if enabled at runtime +#define EIGEN_RUNTIME_NO_MALLOC + +#include "main.h" +#include +#include +#include +#include +#include + +template void nomalloc(const MatrixType& m) +{ + /* this test check no dynamic memory allocation are issued with fixed-size matrices + */ + typedef typename MatrixType::Scalar Scalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + MatrixType m1 = MatrixType::Random(rows, cols), + m2 = MatrixType::Random(rows, cols), + m3(rows, cols); + + Scalar s1 = internal::random(); + + Index r = internal::random(0, rows-1), + c = internal::random(0, cols-1); + + VERIFY_IS_APPROX((m1+m2)*s1, s1*m1+s1*m2); + VERIFY_IS_APPROX((m1+m2)(r,c), (m1(r,c))+(m2(r,c))); + VERIFY_IS_APPROX(m1.cwiseProduct(m1.block(0,0,rows,cols)), (m1.array()*m1.array()).matrix()); + VERIFY_IS_APPROX((m1*m1.transpose())*m2, m1*(m1.transpose()*m2)); + + m2.col(0).noalias() = m1 * m1.col(0); + m2.col(0).noalias() -= m1.adjoint() * m1.col(0); + m2.col(0).noalias() -= m1 * m1.row(0).adjoint(); + m2.col(0).noalias() -= m1.adjoint() * m1.row(0).adjoint(); + + m2.row(0).noalias() = m1.row(0) * m1; + m2.row(0).noalias() -= m1.row(0) * m1.adjoint(); + m2.row(0).noalias() -= m1.col(0).adjoint() * m1; + m2.row(0).noalias() -= m1.col(0).adjoint() * m1.adjoint(); + VERIFY_IS_APPROX(m2,m2); + + m2.col(0).noalias() = m1.template triangularView() * m1.col(0); + m2.col(0).noalias() -= m1.adjoint().template triangularView() * m1.col(0); + m2.col(0).noalias() -= m1.template triangularView() * m1.row(0).adjoint(); + m2.col(0).noalias() -= m1.adjoint().template triangularView() * m1.row(0).adjoint(); + + m2.row(0).noalias() = m1.row(0) * m1.template triangularView(); + m2.row(0).noalias() -= m1.row(0) * m1.adjoint().template triangularView(); + m2.row(0).noalias() -= m1.col(0).adjoint() * m1.template triangularView(); + m2.row(0).noalias() -= m1.col(0).adjoint() * m1.adjoint().template triangularView(); + VERIFY_IS_APPROX(m2,m2); + + m2.col(0).noalias() = m1.template selfadjointView() * m1.col(0); + m2.col(0).noalias() -= m1.adjoint().template selfadjointView() * m1.col(0); + m2.col(0).noalias() -= m1.template selfadjointView() * m1.row(0).adjoint(); + m2.col(0).noalias() -= m1.adjoint().template selfadjointView() * m1.row(0).adjoint(); + + m2.row(0).noalias() = m1.row(0) * m1.template selfadjointView(); + m2.row(0).noalias() -= m1.row(0) * m1.adjoint().template selfadjointView(); + m2.row(0).noalias() -= m1.col(0).adjoint() * m1.template selfadjointView(); + m2.row(0).noalias() -= m1.col(0).adjoint() * m1.adjoint().template selfadjointView(); + VERIFY_IS_APPROX(m2,m2); + + m2.template selfadjointView().rankUpdate(m1.col(0),-1); + m2.template selfadjointView().rankUpdate(m1.row(0),-1); + m2.template selfadjointView().rankUpdate(m1.col(0), m1.col(0)); // rank-2 + + // The following fancy matrix-matrix products are not safe yet regarding static allocation + m2.template selfadjointView().rankUpdate(m1); + m2 += m2.template triangularView() * m1; + m2.template triangularView() = m2 * m2; + m1 += m1.template selfadjointView() * m2; + VERIFY_IS_APPROX(m2,m2); +} + +template +void ctms_decompositions() +{ + const int maxSize = 16; + const int size = 12; + + typedef Eigen::Matrix Matrix; + + typedef Eigen::Matrix Vector; + + typedef Eigen::Matrix, + Eigen::Dynamic, Eigen::Dynamic, + 0, + maxSize, maxSize> ComplexMatrix; + + const Matrix A(Matrix::Random(size, size)), B(Matrix::Random(size, size)); + Matrix X(size,size); + const ComplexMatrix complexA(ComplexMatrix::Random(size, size)); + const Matrix saA = A.adjoint() * A; + const Vector b(Vector::Random(size)); + Vector x(size); + + // Cholesky module + Eigen::LLT LLT; LLT.compute(A); + X = LLT.solve(B); + x = LLT.solve(b); + Eigen::LDLT LDLT; LDLT.compute(A); + X = LDLT.solve(B); + x = LDLT.solve(b); + + // Eigenvalues module + Eigen::HessenbergDecomposition hessDecomp; hessDecomp.compute(complexA); + Eigen::ComplexSchur cSchur(size); cSchur.compute(complexA); + Eigen::ComplexEigenSolver cEigSolver; cEigSolver.compute(complexA); + Eigen::EigenSolver eigSolver; eigSolver.compute(A); + Eigen::SelfAdjointEigenSolver saEigSolver(size); saEigSolver.compute(saA); + Eigen::Tridiagonalization tridiag; tridiag.compute(saA); + + // LU module + Eigen::PartialPivLU ppLU; ppLU.compute(A); + X = ppLU.solve(B); + x = ppLU.solve(b); + Eigen::FullPivLU fpLU; fpLU.compute(A); + X = fpLU.solve(B); + x = fpLU.solve(b); + + // QR module + Eigen::HouseholderQR hQR; hQR.compute(A); + X = hQR.solve(B); + x = hQR.solve(b); + Eigen::ColPivHouseholderQR cpQR; cpQR.compute(A); + X = cpQR.solve(B); + x = cpQR.solve(b); + Eigen::FullPivHouseholderQR fpQR; fpQR.compute(A); + // FIXME X = fpQR.solve(B); + x = fpQR.solve(b); + + // SVD module + Eigen::JacobiSVD jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV); +} + +void test_zerosized() { + // default constructors: + Eigen::MatrixXd A; + Eigen::VectorXd v; + // explicit zero-sized: + Eigen::ArrayXXd A0(0,0); + Eigen::ArrayXd v0(0); + + // assigning empty objects to each other: + A=A0; + v=v0; +} + +template void test_reference(const MatrixType& m) { + typedef typename MatrixType::Scalar Scalar; + enum { Flag = MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; + enum { TransposeFlag = !MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; + Index rows = m.rows(), cols=m.cols(); + typedef Eigen::Matrix MatrixX; + typedef Eigen::Matrix MatrixXT; + // Dynamic reference: + typedef Eigen::Ref Ref; + typedef Eigen::Ref RefT; + + Ref r1(m); + Ref r2(m.block(rows/3, cols/4, rows/2, cols/2)); + RefT r3(m.transpose()); + RefT r4(m.topLeftCorner(rows/2, cols/2).transpose()); + + VERIFY_RAISES_ASSERT(RefT r5(m)); + VERIFY_RAISES_ASSERT(Ref r6(m.transpose())); + VERIFY_RAISES_ASSERT(Ref r7(Scalar(2) * m)); + + // Copy constructors shall also never malloc + Ref r8 = r1; + RefT r9 = r3; + + // Initializing from a compatible Ref shall also never malloc + Eigen::Ref > r10=r8, r11=m; + + // Initializing from an incompatible Ref will malloc: + typedef Eigen::Ref RefAligned; + VERIFY_RAISES_ASSERT(RefAligned r12=r10); + VERIFY_RAISES_ASSERT(Ref r13=r10); // r10 has more dynamic strides + +} + +EIGEN_DECLARE_TEST(nomalloc) +{ + // create some dynamic objects + Eigen::MatrixXd M1 = MatrixXd::Random(3,3); + Ref R1 = 2.0*M1; // Ref requires temporary + + // from here on prohibit malloc: + Eigen::internal::set_is_malloc_allowed(false); + + // check that our operator new is indeed called: + VERIFY_RAISES_ASSERT(MatrixXd dummy(MatrixXd::Random(3,3))); + CALL_SUBTEST_1(nomalloc(Matrix()) ); + CALL_SUBTEST_2(nomalloc(Matrix4d()) ); + CALL_SUBTEST_3(nomalloc(Matrix()) ); + + // Check decomposition modules with dynamic matrices that have a known compile-time max size (ctms) + CALL_SUBTEST_4(ctms_decompositions()); + + CALL_SUBTEST_5(test_zerosized()); + + CALL_SUBTEST_6(test_reference(Matrix())); + CALL_SUBTEST_7(test_reference(R1)); + CALL_SUBTEST_8(Ref R2 = M1.topRows<2>(); test_reference(R2)); +} diff --git a/include/eigen/test/nullary.cpp b/include/eigen/test/nullary.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9b25ea4f36b6be5b02af2f52e7268250d91862cb --- /dev/null +++ b/include/eigen/test/nullary.cpp @@ -0,0 +1,341 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010-2011 Jitse Niesen +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +template +bool equalsIdentity(const MatrixType& A) +{ + typedef typename MatrixType::Scalar Scalar; + Scalar zero = static_cast(0); + + bool offDiagOK = true; + for (Index i = 0; i < A.rows(); ++i) { + for (Index j = i+1; j < A.cols(); ++j) { + offDiagOK = offDiagOK && (A(i,j) == zero); + } + } + for (Index i = 0; i < A.rows(); ++i) { + for (Index j = 0; j < (std::min)(i, A.cols()); ++j) { + offDiagOK = offDiagOK && (A(i,j) == zero); + } + } + + bool diagOK = (A.diagonal().array() == 1).all(); + return offDiagOK && diagOK; + +} + +template +void check_extremity_accuracy(const VectorType &v, const typename VectorType::Scalar &low, const typename VectorType::Scalar &high) +{ + typedef typename VectorType::Scalar Scalar; + typedef typename VectorType::RealScalar RealScalar; + + RealScalar prec = internal::is_same::value ? NumTraits::dummy_precision()*10 : NumTraits::dummy_precision()/10; + Index size = v.size(); + + if(size<20) + return; + + for (int i=0; isize-6) + { + Scalar ref = (low*RealScalar(size-i-1))/RealScalar(size-1) + (high*RealScalar(i))/RealScalar(size-1); + if(std::abs(ref)>1) + { + if(!internal::isApprox(v(i), ref, prec)) + std::cout << v(i) << " != " << ref << " ; relative error: " << std::abs((v(i)-ref)/ref) << " ; required precision: " << prec << " ; range: " << low << "," << high << " ; i: " << i << "\n"; + VERIFY(internal::isApprox(v(i), (low*RealScalar(size-i-1))/RealScalar(size-1) + (high*RealScalar(i))/RealScalar(size-1), prec)); + } + } + } +} + +template +void testVectorType(const VectorType& base) +{ + typedef typename VectorType::Scalar Scalar; + typedef typename VectorType::RealScalar RealScalar; + + const Index size = base.size(); + + Scalar high = internal::random(-500,500); + Scalar low = (size == 1 ? high : internal::random(-500,500)); + if (numext::real(low)>numext::real(high)) std::swap(low,high); + + // check low==high + if(internal::random(0.f,1.f)<0.05f) + low = high; + // check abs(low) >> abs(high) + else if(size>2 && std::numeric_limits::max_exponent10>0 && internal::random(0.f,1.f)<0.1f) + low = -internal::random(1,2) * RealScalar(std::pow(RealScalar(10),std::numeric_limits::max_exponent10/2)); + + const Scalar step = ((size == 1) ? 1 : (high-low)/RealScalar(size-1)); + + // check whether the result yields what we expect it to do + VectorType m(base); + m.setLinSpaced(size,low,high); + + if(!NumTraits::IsInteger) + { + VectorType n(size); + for (int i=0; i::IsInteger) || (range_length>=size && (Index(range_length)%(size-1))==0) || (Index(range_length+1)::IsInteger) || (range_length>=size)) + for (int i=0; i::IsInteger) + CALL_SUBTEST( check_extremity_accuracy(m, low, high) ); + } + + VERIFY( numext::real(m(m.size()-1)) <= numext::real(high) ); + VERIFY( (m.array().real() <= numext::real(high)).all() ); + VERIFY( (m.array().real() >= numext::real(low)).all() ); + + + VERIFY( numext::real(m(m.size()-1)) >= numext::real(low) ); + if(size>=1) + { + VERIFY( internal::isApprox(m(0),low) ); + VERIFY_IS_EQUAL(m(0) , low); + } + + // check whether everything works with row and col major vectors + Matrix row_vector(size); + Matrix col_vector(size); + row_vector.setLinSpaced(size,low,high); + col_vector.setLinSpaced(size,low,high); + // when using the extended precision (e.g., FPU) the relative error might exceed 1 bit + // when computing the squared sum in isApprox, thus the 2x factor. + VERIFY( row_vector.isApprox(col_vector.transpose(), RealScalar(2)*NumTraits::epsilon())); + + Matrix size_changer(size+50); + size_changer.setLinSpaced(size,low,high); + VERIFY( size_changer.size() == size ); + + typedef Matrix ScalarMatrix; + ScalarMatrix scalar; + scalar.setLinSpaced(1,low,high); + VERIFY_IS_APPROX( scalar, ScalarMatrix::Constant(high) ); + VERIFY_IS_APPROX( ScalarMatrix::LinSpaced(1,low,high), ScalarMatrix::Constant(high) ); + + // regression test for bug 526 (linear vectorized transversal) + if (size > 1 && (!NumTraits::IsInteger)) { + m.tail(size-1).setLinSpaced(low, high); + VERIFY_IS_APPROX(m(size-1), high); + } + + // regression test for bug 1383 (LinSpaced with empty size/range) + { + Index n0 = VectorType::SizeAtCompileTime==Dynamic ? 0 : VectorType::SizeAtCompileTime; + low = internal::random(); + m = VectorType::LinSpaced(n0,low,low-RealScalar(1)); + VERIFY(m.size()==n0); + + if(VectorType::SizeAtCompileTime==Dynamic) + { + VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,0,Scalar(n0-1)).sum(),Scalar(0)); + VERIFY_IS_EQUAL(VectorType::LinSpaced(n0,low,low-RealScalar(1)).sum(),Scalar(0)); + } + + m.setLinSpaced(n0,0,Scalar(n0-1)); + VERIFY(m.size()==n0); + m.setLinSpaced(n0,low,low-RealScalar(1)); + VERIFY(m.size()==n0); + + // empty range only: + VERIFY_IS_APPROX(VectorType::LinSpaced(size,low,low),VectorType::Constant(size,low)); + m.setLinSpaced(size,low,low); + VERIFY_IS_APPROX(m,VectorType::Constant(size,low)); + + if(NumTraits::IsInteger) + { + VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar(size-1)), VectorType::LinSpaced(size,low+Scalar(size-1),low).reverse() ); + + if(VectorType::SizeAtCompileTime==Dynamic) + { + // Check negative multiplicator path: + for(Index k=1; k<5; ++k) + VERIFY_IS_APPROX( VectorType::LinSpaced(size,low,low+Scalar((size-1)*k)), VectorType::LinSpaced(size,low+Scalar((size-1)*k),low).reverse() ); + // Check negative divisor path: + for(Index k=1; k<5; ++k) + VERIFY_IS_APPROX( VectorType::LinSpaced(size*k,low,low+Scalar(size-1)), VectorType::LinSpaced(size*k,low+Scalar(size-1),low).reverse() ); + } + } + } + + // test setUnit() + if(m.size()>0) + { + for(Index k=0; k<10; ++k) + { + Index i = internal::random(0,m.size()-1); + m.setUnit(i); + VERIFY_IS_APPROX( m, VectorType::Unit(m.size(), i) ); + } + if(VectorType::SizeAtCompileTime==Dynamic) + { + Index i = internal::random(0,2*m.size()-1); + m.setUnit(2*m.size(),i); + VERIFY_IS_APPROX( m, VectorType::Unit(m.size(),i) ); + } + } + +} + +template +void testMatrixType(const MatrixType& m) +{ + using std::abs; + const Index rows = m.rows(); + const Index cols = m.cols(); + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + Scalar s1; + do { + s1 = internal::random(); + } while(abs(s1)::IsInteger)); + + MatrixType A; + A.setIdentity(rows, cols); + VERIFY(equalsIdentity(A)); + VERIFY(equalsIdentity(MatrixType::Identity(rows, cols))); + + + A = MatrixType::Constant(rows,cols,s1); + Index i = internal::random(0,rows-1); + Index j = internal::random(0,cols-1); + VERIFY_IS_APPROX( MatrixType::Constant(rows,cols,s1)(i,j), s1 ); + VERIFY_IS_APPROX( MatrixType::Constant(rows,cols,s1).coeff(i,j), s1 ); + VERIFY_IS_APPROX( A(i,j), s1 ); +} + +template +void bug79() +{ + // Assignment of a RowVectorXd to a MatrixXd (regression test for bug #79). + VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits::epsilon() ); +} + +template +void bug1630() +{ + Array4d x4 = Array4d::LinSpaced(0.0, 1.0); + Array3d x3(Array4d::LinSpaced(0.0, 1.0).head(3)); + VERIFY_IS_APPROX(x4.head(3), x3); +} + +template +void nullary_overflow() +{ + // Check possible overflow issue + int n = 60000; + ArrayXi a1(n), a2(n); + a1.setLinSpaced(n, 0, n-1); + for(int i=0; i +void nullary_internal_logic() +{ + // check some internal logic + VERIFY(( internal::has_nullary_operator >::value )); + VERIFY(( !internal::has_unary_operator >::value )); + VERIFY(( !internal::has_binary_operator >::value )); + VERIFY(( internal::functor_has_linear_access >::ret )); + + VERIFY(( !internal::has_nullary_operator >::value )); + VERIFY(( !internal::has_unary_operator >::value )); + VERIFY(( internal::has_binary_operator >::value )); + VERIFY(( !internal::functor_has_linear_access >::ret )); + + VERIFY(( !internal::has_nullary_operator >::value )); + VERIFY(( internal::has_unary_operator >::value )); + VERIFY(( !internal::has_binary_operator >::value )); + VERIFY(( internal::functor_has_linear_access >::ret )); + + // Regression unit test for a weird MSVC bug. + // Search "nullary_wrapper_workaround_msvc" in CoreEvaluators.h for the details. + // See also traits::match. + { + MatrixXf A = MatrixXf::Random(3,3); + Ref R = 2.0*A; + VERIFY_IS_APPROX(R, A+A); + + Ref R1 = MatrixXf::Random(3,3)+A; + + VectorXi V = VectorXi::Random(3); + Ref R2 = VectorXi::LinSpaced(3,1,3)+V; + VERIFY_IS_APPROX(R2, V+Vector3i(1,2,3)); + + VERIFY(( internal::has_nullary_operator >::value )); + VERIFY(( !internal::has_unary_operator >::value )); + VERIFY(( !internal::has_binary_operator >::value )); + VERIFY(( internal::functor_has_linear_access >::ret )); + + VERIFY(( !internal::has_nullary_operator >::value )); + VERIFY(( internal::has_unary_operator >::value )); + VERIFY(( !internal::has_binary_operator >::value )); + VERIFY(( internal::functor_has_linear_access >::ret )); + } +} + +EIGEN_DECLARE_TEST(nullary) +{ + CALL_SUBTEST_1( testMatrixType(Matrix2d()) ); + CALL_SUBTEST_2( testMatrixType(MatrixXcf(internal::random(1,300),internal::random(1,300))) ); + CALL_SUBTEST_3( testMatrixType(MatrixXf(internal::random(1,300),internal::random(1,300))) ); + + for(int i = 0; i < g_repeat*10; i++) { + CALL_SUBTEST_3( testVectorType(VectorXcd(internal::random(1,30000))) ); + CALL_SUBTEST_4( testVectorType(VectorXd(internal::random(1,30000))) ); + CALL_SUBTEST_5( testVectorType(Vector4d()) ); // regression test for bug 232 + CALL_SUBTEST_6( testVectorType(Vector3d()) ); + CALL_SUBTEST_7( testVectorType(VectorXf(internal::random(1,30000))) ); + CALL_SUBTEST_8( testVectorType(Vector3f()) ); + CALL_SUBTEST_8( testVectorType(Vector4f()) ); + CALL_SUBTEST_8( testVectorType(Matrix()) ); + CALL_SUBTEST_8( testVectorType(Matrix()) ); + + CALL_SUBTEST_9( testVectorType(VectorXi(internal::random(1,10))) ); + CALL_SUBTEST_9( testVectorType(VectorXi(internal::random(9,300))) ); + CALL_SUBTEST_9( testVectorType(Matrix()) ); + } + + CALL_SUBTEST_6( bug79<0>() ); + CALL_SUBTEST_6( bug1630<0>() ); + CALL_SUBTEST_9( nullary_overflow<0>() ); + CALL_SUBTEST_10( nullary_internal_logic<0>() ); +} diff --git a/include/eigen/test/num_dimensions.cpp b/include/eigen/test/num_dimensions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ad7ef6979d67cf3390bb4ce609f688d183cfea0 --- /dev/null +++ b/include/eigen/test/num_dimensions.cpp @@ -0,0 +1,90 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +template +void check_dim(const Xpr& ) { + STATIC_CHECK( Xpr::NumDimensions == ExpectedDim ); +} + +#if EIGEN_HAS_CXX11 +template