repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
LRMI-main/Eigen/src/misc/RealSvd2x2.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2013-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REALSVD2X2_H
#define EIGEN_REALSVD2X2_H
namespace Eigen {
namespace internal {
template<typename MatrixType, typename RealScalar, typename Index>
void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_left,
JacobiRotation<RealScalar> *j_right)
{
using std::sqrt;
using std::abs;
Matrix<RealScalar,2,2> m;
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
if(abs(d) < (std::numeric_limits<RealScalar>::min)())
{
rot1.s() = RealScalar(0);
rot1.c() = RealScalar(1);
}
else
{
// If d!=0, then t/d cannot overflow because the magnitude of the
// entries forming d are not too small compared to the ones forming t.
RealScalar u = t / d;
RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));
rot1.s() = RealScalar(1) / tmp;
rot1.c() = u / tmp;
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
*j_left = rot1 * j_right->transpose();
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_REALSVD2X2_H
| 1,748
| 30.232143
| 74
|
h
|
null |
LRMI-main/Eigen/src/plugins/MatrixCwiseUnaryOps.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is included into the body of the base classes supporting matrix specific coefficient-wise functions.
// This include MatrixBase and SparseMatrixBase.
typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType;
typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType;
typedef CwiseUnaryOp<internal::scalar_arg_op<Scalar>, const Derived> CwiseArgReturnType;
typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType;
typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType;
typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType;
/// \returns an expression of the coefficient-wise absolute value of \c *this
///
/// Example: \include MatrixBase_cwiseAbs.cpp
/// Output: \verbinclude MatrixBase_cwiseAbs.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value)
///
/// \sa cwiseAbs2()
///
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseAbsReturnType
cwiseAbs() const { return CwiseAbsReturnType(derived()); }
/// \returns an expression of the coefficient-wise squared absolute value of \c *this
///
/// Example: \include MatrixBase_cwiseAbs2.cpp
/// Output: \verbinclude MatrixBase_cwiseAbs2.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value)
///
/// \sa cwiseAbs()
///
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseAbs2ReturnType
cwiseAbs2() const { return CwiseAbs2ReturnType(derived()); }
/// \returns an expression of the coefficient-wise square root of *this.
///
/// Example: \include MatrixBase_cwiseSqrt.cpp
/// Output: \verbinclude MatrixBase_cwiseSqrt.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root)
///
/// \sa cwisePow(), cwiseSquare()
///
EIGEN_DEVICE_FUNC
inline const CwiseSqrtReturnType
cwiseSqrt() const { return CwiseSqrtReturnType(derived()); }
/// \returns an expression of the coefficient-wise signum of *this.
///
/// Example: \include MatrixBase_cwiseSign.cpp
/// Output: \verbinclude MatrixBase_cwiseSign.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function)
///
EIGEN_DEVICE_FUNC
inline const CwiseSignReturnType
cwiseSign() const { return CwiseSignReturnType(derived()); }
/// \returns an expression of the coefficient-wise inverse of *this.
///
/// Example: \include MatrixBase_cwiseInverse.cpp
/// Output: \verbinclude MatrixBase_cwiseInverse.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse)
///
/// \sa cwiseProduct()
///
EIGEN_DEVICE_FUNC
inline const CwiseInverseReturnType
cwiseInverse() const { return CwiseInverseReturnType(derived()); }
/// \returns an expression of the coefficient-wise phase angle of \c *this
///
/// Example: \include MatrixBase_cwiseArg.cpp
/// Output: \verbinclude MatrixBase_cwiseArg.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseArg,arg)
EIGEN_DEVICE_FUNC
inline const CwiseArgReturnType
cwiseArg() const { return CwiseArgReturnType(derived()); }
| 3,350
| 33.90625
| 113
|
h
|
null |
LRMI-main/Spectra/DavidsonSymEigsSolver.h
|
// Copyright (C) 2020 Netherlands eScience Center <f.zapata@esciencecenter.nl>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
#define SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
#include <Eigen/Core>
#include "JDSymEigsBase.h"
#include "Util/SelectionRule.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implement the DPR correction for the Davidson algorithms.
/// The algorithms in the Davidson family only differ in how the correction
/// vectors are computed and optionally in the initial orthogonal basis set.
///
/// the DPR correction compute the new correction vector using the following expression:
/// \f[ correction = -(\boldsymbol{D} - \rho \boldsymbol{I})^{-1} \boldsymbol{r} \f]
/// where
/// \f$D\f$ is the diagonal of the target matrix, \f$\rho\f$ the Ritz eigenvalue,
/// \f$I\f$ the identity matrix and \f$r\f$ the residue vector.
///
template <typename OpType>
class DavidsonSymEigsSolver : public JDSymEigsBase<DavidsonSymEigsSolver<OpType>, OpType>
{
private:
using Index = Eigen::Index;
using Scalar = typename OpType::Scalar;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
Vector m_diagonal;
public:
DavidsonSymEigsSolver(OpType& op, Index nev) :
JDSymEigsBase<DavidsonSymEigsSolver<OpType>, OpType>(op, nev)
{
m_diagonal.resize(this->m_matrix_operator.rows());
for (Index i = 0; i < op.rows(); i++)
{
m_diagonal(i) = op(i, i);
}
}
/// Create initial search space based on the diagonal
/// and the spectrum'target (highest or lowest)
///
/// \param selection Spectrum section to target (e.g. lowest, etc.)
/// \return Matrix with the initial orthonormal basis
Matrix setup_initial_search_space(SortRule selection) const
{
std::vector<Eigen::Index> indices_sorted = argsort(selection, m_diagonal);
Matrix initial_basis = Matrix::Zero(this->m_matrix_operator.rows(), this->m_initial_search_space_size);
for (Index k = 0; k < this->m_initial_search_space_size; k++)
{
Index row = indices_sorted[k];
initial_basis(row, k) = 1.0;
}
return initial_basis;
}
/// Compute the corrections using the DPR method.
///
/// \return New correction vectors.
Matrix calculate_correction_vector() const
{
const Matrix& residues = this->m_ritz_pairs.residues();
const Vector& eigvals = this->m_ritz_pairs.ritz_values();
Matrix correction = Matrix::Zero(this->m_matrix_operator.rows(), this->m_correction_size);
for (Index k = 0; k < this->m_correction_size; k++)
{
Vector tmp = eigvals(k) - m_diagonal.array();
correction.col(k) = residues.col(k).array() / tmp.array();
}
return correction;
}
};
} // namespace Spectra
#endif // SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
| 3,169
| 33.835165
| 111
|
h
|
null |
LRMI-main/Spectra/GenEigsBase.h
|
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_BASE_H
#define SPECTRA_GEN_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow, std::sqrt
#include <algorithm> // std::min, std::copy
#include <complex> // std::complex, std::conj, std::norm, std::abs
#include <stdexcept> // std::invalid_argument
#include "Util/Version.h"
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/DoubleShiftQR.h"
#include "LinAlg/UpperHessenbergEigen.h"
#include "LinAlg/Arnoldi.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This is the base class for general eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as GenEigsSolver and GenEigsRealShiftSolver.
///
template <typename OpType, typename BOpType>
class GenEigsBase
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>;
using MapMat = Eigen::Map<Matrix>;
using MapVec = Eigen::Map<Vector>;
using MapConstVec = Eigen::Map<const Vector>;
using Complex = std::complex<Scalar>;
using ComplexMatrix = Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic>;
using ComplexVector = Eigen::Matrix<Complex, Eigen::Dynamic, 1>;
using ArnoldiOpType = ArnoldiOp<Scalar, OpType, BOpType>;
using ArnoldiFac = Arnoldi<Scalar, ArnoldiOpType>;
protected:
// clang-format off
OpType& m_op; // object to conduct matrix operation,
// e.g. matrix-vector product
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
ArnoldiFac m_fac; // Arnoldi factorization
ComplexVector m_ritz_val; // Ritz values
ComplexMatrix m_ritz_vec; // Ritz vectors
ComplexVector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
private:
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
CompInfo m_info; // status of the computation
// clang-format on
// Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part
// Complex Ritz values have exact conjugate pairs
// So we use exact tests here
static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); }
static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); }
// Implicitly restarted Arnoldi factorization
void restart(Index k, SortRule selection)
{
using std::norm;
if (k >= m_ncv)
return;
DoubleShiftQR<Scalar> decomp_ds(m_ncv);
UpperHessenbergQR<Scalar> decomp_hb(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
for (Index i = k; i < m_ncv; i++)
{
if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1]))
{
// H - mu * I = Q1 * R1
// H <- R1 * Q1 + mu * I = Q1' * H * Q1
// H - conj(mu) * I = Q2 * R2
// H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2
//
// (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R
const Scalar s = Scalar(2) * m_ritz_val[i].real();
const Scalar t = norm(m_ritz_val[i]);
decomp_ds.compute(m_fac.matrix_H(), s, t);
// Q -> Q * Qi
decomp_ds.apply_YQ(Q);
// H -> Q'HQ
// Matrix Q = Matrix::Identity(m_ncv, m_ncv);
// decomp_ds.apply_YQ(Q);
// m_fac_H = Q.transpose() * m_fac_H * Q;
m_fac.compress_H(decomp_ds);
i++;
}
else
{
// QR decomposition of H - mu * I, mu is real
decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real());
// Q -> Q * Qi
decomp_hb.apply_YQ(Q);
// H -> Q'HQ = RQ + mu * I
m_fac.compress_H(decomp_hb);
}
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
}
// Calculates the number of converged Ritz values
Index num_converged(const Scalar& tol)
{
using std::pow;
// The machine precision, ~= 1e-16 for the "double" type
constexpr Scalar eps = TypeTraits<Scalar>::epsilon();
// std::pow() is not constexpr, so we do not declare eps23 to be constexpr
// But most compilers should be able to compute eps23 at compile time
const Scalar eps23 = pow(eps, Scalar(2) / 3);
// thresh = tol * max(eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.count();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
// A very small value, but 1.0 / near_0 does not overflow
// ~= 1e-307 for the "double" type
constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10);
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < near_0)
nev_new++;
// Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK
nev_new += (std::min)(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 3)
nev_new = 2;
if (nev_new > m_ncv - 2)
nev_new = m_ncv - 2;
// Increase nev by one if ritz_val[nev - 1] and
// ritz_val[nev] are conjugate pairs
if (is_complex(m_ritz_val[nev_new - 1]) &&
is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new]))
{
nev_new++;
}
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair(SortRule selection)
{
UpperHessenbergEigen<Scalar> decomp(m_fac.matrix_H());
const ComplexVector& evals = decomp.eigenvalues();
ComplexMatrix evecs = decomp.eigenvectors();
// Sort Ritz values and put the wanted ones at the beginning
std::vector<Index> ind;
switch (selection)
{
case SortRule::LargestMagn:
{
SortEigenvalue<Complex, SortRule::LargestMagn> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::LargestReal:
{
SortEigenvalue<Complex, SortRule::LargestReal> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::LargestImag:
{
SortEigenvalue<Complex, SortRule::LargestImag> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::SmallestMagn:
{
SortEigenvalue<Complex, SortRule::SmallestMagn> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::SmallestReal:
{
SortEigenvalue<Complex, SortRule::SmallestReal> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
case SortRule::SmallestImag:
{
SortEigenvalue<Complex, SortRule::SmallestImag> sorting(evals.data(), m_ncv);
sorting.swap(ind);
break;
}
default:
throw std::invalid_argument("unsupported selection rule");
}
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(SortRule sort_rule)
{
std::vector<Index> ind;
switch (sort_rule)
{
case SortRule::LargestMagn:
{
SortEigenvalue<Complex, SortRule::LargestMagn> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::LargestReal:
{
SortEigenvalue<Complex, SortRule::LargestReal> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::LargestImag:
{
SortEigenvalue<Complex, SortRule::LargestImag> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::SmallestMagn:
{
SortEigenvalue<Complex, SortRule::SmallestMagn> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::SmallestReal:
{
SortEigenvalue<Complex, SortRule::SmallestReal> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
case SortRule::SmallestImag:
{
SortEigenvalue<Complex, SortRule::SmallestImag> sorting(m_ritz_val.data(), m_nev);
sorting.swap(ind);
break;
}
default:
throw std::invalid_argument("unsupported sorting rule");
}
ComplexVector new_ritz_val(m_ncv);
ComplexMatrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
GenEigsBase(OpType& op, const BOpType& Bop, Index nev, Index ncv) :
m_op(op),
m_n(m_op.rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(CompInfo::NotComputed)
{
if (nev < 1 || nev > m_n - 2)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix");
if (ncv < nev + 2 || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~GenEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Arnoldi factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param selection An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `SortRule::LargestMagn`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sorting Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `SortRule::LargestMagn`, `SortRule::LargestReal`,
/// `SortRule::LargestImag`, `SortRule::SmallestMagn`,
/// `SortRule::SmallestReal` and `SortRule::SmallestImag`,
/// for example `SortRule::LargestMagn` indicates that eigenvalues
/// with largest magnitude come first.
/// Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the parameter `selection`.
///
/// \return Number of converged eigenvalues.
///
Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 1000,
Scalar tol = 1e-10, SortRule sorting = SortRule::LargestMagn)
{
// The m-step Arnoldi factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj, selection);
}
// Sorting results
sort_ritzpair(sorting);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? CompInfo::Successful : CompInfo::NotConverging;
return (std::min)(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A complex-valued vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<std::complex<Scalar>, ...>`, depending on
/// the template parameter `Scalar` defined.
///
ComplexVector eigenvalues() const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
ComplexVector res(nconv);
if (!nconv)
return res;
Index j = 0;
for (Index i = 0; i < m_nev; i++)
{
if (m_ritz_conv[i])
{
res[j] = m_ritz_val[i];
j++;
}
}
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A complex-valued matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<std::complex<Scalar>, ...>`,
/// depending on the template parameter `Scalar` defined.
///
ComplexMatrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.cast<Index>().sum();
nvec = (std::min)(nvec, nconv);
ComplexMatrix res(m_n, nvec);
if (!nvec)
return res;
ComplexMatrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
ComplexMatrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_BASE_H
| 18,186
| 33.121951
| 105
|
h
|
null |
LRMI-main/Spectra/GenEigsComplexShiftSolver.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#define SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenComplexShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a complex shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenComplexShiftSolve and
/// SparseGenComplexShiftSolve, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseGenComplexShiftSolve.
///
template <typename OpType = DenseGenComplexShiftSolve<double>>
class GenEigsComplexShiftSolver : public GenEigsBase<OpType, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Complex = std::complex<Scalar>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using ComplexArray = Eigen::Array<Complex, Eigen::Dynamic, 1>;
using Base = GenEigsBase<OpType, IdentityBOp>;
using Base::m_op;
using Base::m_n;
using Base::m_nev;
using Base::m_fac;
using Base::m_ritz_val;
using Base::m_ritz_vec;
const Scalar m_sigmar;
const Scalar m_sigmai;
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
using std::abs;
using std::sqrt;
using std::norm;
// The eigenvalues we get from the iteration is
// nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma)))
// So the eigenvalues of the original problem is
// 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2)
// lambda = sigmar + -----------------------------------
// 2 * nu
// We need to pick the correct root
// Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj
// and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj
// where r is any shift value.
// We can use this identity to determine lambdaj
//
// op(v) computes Re(inv(A - r * I) * v) for any real v
// If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj),
// then op(vj) = op(a) + op(b) * i
// By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine
// which one is the correct root
// Select a random shift value
SimpleRandom<Scalar> rng(0);
const Scalar shiftr = rng.random() * m_sigmar + rng.random();
const Complex shift = Complex(shiftr, Scalar(0));
m_op.set_shift(shiftr, Scalar(0));
// Calculate inv(A - r * I) * vj
Vector v_real(m_n), v_imag(m_n), OPv_real(m_n), OPv_imag(m_n);
constexpr Scalar eps = TypeTraits<Scalar>::epsilon();
for (Index i = 0; i < m_nev; i++)
{
v_real.noalias() = m_fac.matrix_V() * m_ritz_vec.col(i).real();
v_imag.noalias() = m_fac.matrix_V() * m_ritz_vec.col(i).imag();
m_op.perform_op(v_real.data(), OPv_real.data());
m_op.perform_op(v_imag.data(), OPv_imag.data());
// Two roots computed from the quadratic equation
const Complex nu = m_ritz_val[i];
const Complex root_part1 = m_sigmar + Scalar(0.5) / nu;
const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu;
const Complex root1 = root_part1 + root_part2;
const Complex root2 = root_part1 - root_part2;
// Test roots
Scalar err1 = Scalar(0), err2 = Scalar(0);
for (int k = 0; k < m_n; k++)
{
const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift);
const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift);
const Complex OPv = Complex(OPv_real[k], OPv_imag[k]);
err1 += norm(OPv - rhs1);
err2 += norm(OPv - rhs2);
}
const Complex lambdaj = (err1 < err2) ? root1 : root2;
m_ritz_val[i] = lambdaj;
if (abs(Eigen::numext::imag(lambdaj)) > eps)
{
m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj);
i++;
}
else
{
m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0));
}
}
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op The matrix operation object that implements
/// the complex shift-solve operation of \f$A\f$: calculating
/// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenComplexShiftSolve, or
/// define their own that implements all the public members
/// as in DenseGenComplexShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigmar The real part of the shift.
/// \param sigmai The imaginary part of the shift.
///
GenEigsComplexShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) :
Base(op, IdentityBOp(), nev, ncv),
m_sigmar(sigmar), m_sigmai(sigmai)
{
op.set_shift(m_sigmar, m_sigmai);
}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
| 6,755
| 41.225
| 120
|
h
|
null |
LRMI-main/Spectra/GenEigsRealShiftSolver.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
#define SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenRealShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices with
/// a real shift value in the **shift-and-invert mode**. The background
/// knowledge of the shift-and-invert mode can be found in the documentation
/// of the SymEigsShiftSolver class.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenRealShiftSolve and
/// SparseGenRealShiftSolve, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseGenRealShiftSolve.
///
template <typename OpType = DenseGenRealShiftSolve<double>>
class GenEigsRealShiftSolver : public GenEigsBase<OpType, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Complex = std::complex<Scalar>;
using ComplexArray = Eigen::Array<Complex, Eigen::Dynamic, 1>;
using Base = GenEigsBase<OpType, IdentityBOp>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
m_ritz_val.head(m_nev) = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma;
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op The matrix operation object that implements
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenRealShiftSolve, or
/// define their own that implements all the public members
/// as in DenseGenRealShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
/// \param sigma The real-valued shift.
///
GenEigsRealShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigma) :
Base(op, IdentityBOp(), nev, ncv),
m_sigma(sigma)
{
op.set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
| 3,518
| 39.918605
| 98
|
h
|
null |
LRMI-main/Spectra/GenEigsSolver.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEN_EIGS_SOLVER_H
#define SPECTRA_GEN_EIGS_SOLVER_H
#include <Eigen/Core>
#include "GenEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseGenMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for general real matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix.
///
/// Most of the background information documented in the SymEigsSolver class
/// also applies to the GenEigsSolver class here, except that the eigenvalues
/// and eigenvectors of a general matrix can now be complex-valued.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseGenMatProd and
/// SparseGenMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseGenMatProd.
///
/// An example that illustrates the usage of GenEigsSolver is give below:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/GenEigsSolver.h>
/// // <Spectra/MatOp/DenseGenMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10);
///
/// // Construct matrix operation object using the wrapper class
/// DenseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest
/// // (in magnitude, or norm) three eigenvalues
/// GenEigsSolver<DenseGenMatProd<double>> eigs(op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute(SortRule::LargestMagn);
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if (eigs.info() == CompInfo::Successful)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And also an example for sparse matrices:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/GenEigsSolver.h>
/// #include <Spectra/MatOp/SparseGenMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal,
/// // and 3 on the above-main subdiagonal
/// const int n = 10;
/// Eigen::SparseMatrix<double> M(n, n);
/// M.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// M.insert(i, i) = 1.0;
/// if (i > 0)
/// M.insert(i - 1, i) = 3.0;
/// if (i < n - 1)
/// M.insert(i + 1, i) = 2.0;
/// }
///
/// // Construct matrix operation object using the wrapper class SparseGenMatProd
/// SparseGenMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// GenEigsSolver<SparseGenMatProd<double>> eigs(op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute(SortRule::LargestMagn);
///
/// // Retrieve results
/// Eigen::VectorXcd evalues;
/// if (eigs.info() == CompInfo::Successful)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
template <typename OpType = DenseGenMatProd<double>>
class GenEigsSolver : public GenEigsBase<OpType, IdentityBOp>
{
private:
using Index = Eigen::Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that implements
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseGenMatProd, or
/// define their own that implements all the public members
/// as in DenseGenMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$.
///
GenEigsSolver(OpType& op, Index nev, Index ncv) :
GenEigsBase<OpType, IdentityBOp>(op, IdentityBOp(), nev, ncv)
{}
};
} // namespace Spectra
#endif // SPECTRA_GEN_EIGS_SOLVER_H
| 5,233
| 33.893333
| 96
|
h
|
null |
LRMI-main/Spectra/JDSymEigsBase.h
|
// Copyright (C) 2020 Netherlands eScience Center <J.Wehner@esciencecenter.nl>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_JD_SYM_EIGS_BASE_H
#define SPECTRA_JD_SYM_EIGS_BASE_H
#include <Eigen/Core>
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow
#include <algorithm> // std::min
#include <stdexcept> // std::invalid_argument
#include <iostream>
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "LinAlg/SearchSpace.h"
#include "LinAlg/RitzPairs.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This is the base class for symmetric JD eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as DavidsonSymEigsSolver.
///
/// This class uses the CRTP method to call functions from the derived class.
///
template <typename Derived, typename OpType>
class JDSymEigsBase
{
protected:
using Index = Eigen::Index;
using Scalar = typename OpType::Scalar;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_matrix_operator; // object to conduct matrix operation,
// e.g. matrix-vector product
Index niter_ = 0;
const Index m_number_eigenvalues; // number of eigenvalues requested
Index m_max_search_space_size;
Index m_initial_search_space_size;
Index m_correction_size; // how many correction vectors are added in each iteration
RitzPairs<Scalar> m_ritz_pairs; // Ritz eigen pair structure
SearchSpace<Scalar> m_search_space; // search space
private:
CompInfo m_info = CompInfo::NotComputed; // status of the computation
void check_argument() const
{
if (m_number_eigenvalues < 1 || m_number_eigenvalues > m_matrix_operator.cols() - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
}
public:
JDSymEigsBase(OpType& op, Index nev) :
m_matrix_operator(op),
m_number_eigenvalues(nev),
m_max_search_space_size(10 * m_number_eigenvalues),
m_initial_search_space_size(2 * m_number_eigenvalues),
m_correction_size(m_number_eigenvalues)
{
check_argument();
//TODO better input validation and checks
if (op.cols() < m_max_search_space_size)
{
m_max_search_space_size = op.cols();
}
if (op.cols() < m_initial_search_space_size + m_correction_size)
{
m_initial_search_space_size = op.cols() / 3;
m_correction_size = op.cols() / 3;
}
}
///
/// Sets the Maxmium SearchspaceSize after which is deflated
///
void set_max_search_space_size(Index max_search_space_size)
{
m_max_search_space_size = max_search_space_size;
}
///
/// Sets how many correction vectors are added in each iteration
///
void set_correction_size(Index correction_size)
{
m_correction_size = correction_size;
}
///
/// Sets the Initial SearchspaceSize for Ritz values
///
void set_initial_search_space_size(Index initial_search_space_size)
{
m_initial_search_space_size = initial_search_space_size;
}
///
/// Virtual destructor
///
virtual ~JDSymEigsBase() {}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return niter_; }
Vector eigenvalues() const { return m_ritz_pairs.ritz_values().head(m_number_eigenvalues); }
Matrix eigenvectors() const { return m_ritz_pairs.ritz_vectors().leftCols(m_number_eigenvalues); }
Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 100,
Scalar tol = 100 * Eigen::NumTraits<Scalar>::dummy_precision())
{
Derived& derived = static_cast<Derived&>(*this);
Matrix intial_space = derived.setup_initial_search_space(selection);
return compute_with_guess(intial_space, selection, maxit, tol);
}
Index compute_with_guess(const Eigen::Ref<const Matrix>& initial_space,
SortRule selection = SortRule::LargestMagn,
Index maxit = 100,
Scalar tol = 100 * Eigen::NumTraits<Scalar>::dummy_precision())
{
m_search_space.initialize_search_space(initial_space);
niter_ = 0;
for (niter_ = 0; niter_ < maxit; niter_++)
{
bool do_restart = (m_search_space.size() > m_max_search_space_size);
if (do_restart)
{
m_search_space.restart(m_ritz_pairs, m_initial_search_space_size);
}
m_search_space.update_operator_basis_product(m_matrix_operator);
Eigen::ComputationInfo small_problem_info = m_ritz_pairs.compute_eigen_pairs(m_search_space);
if (small_problem_info != Eigen::ComputationInfo::Success)
{
m_info = CompInfo::NumericalIssue;
break;
}
m_ritz_pairs.sort(selection);
bool converged = m_ritz_pairs.check_convergence(tol, m_number_eigenvalues);
if (converged)
{
m_info = CompInfo::Successful;
break;
}
else if (niter_ == maxit - 1)
{
m_info = CompInfo::NotConverging;
break;
}
Derived& derived = static_cast<Derived&>(*this);
Matrix corr_vect = derived.calculate_correction_vector();
m_search_space.extend_basis(corr_vect);
}
return (m_ritz_pairs.converged_eigenvalues()).template cast<Index>().head(m_number_eigenvalues).sum();
}
};
} // namespace Spectra
#endif // SPECTRA_JD_SYM_EIGS_BASE_H
| 6,303
| 33.26087
| 110
|
h
|
null |
LRMI-main/Spectra/SymEigsBase.h
|
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_EIGS_BASE_H
#define SPECTRA_SYM_EIGS_BASE_H
#include "Eigen/Core"
#include <vector> // std::vector
#include <cmath> // std::abs, std::pow
#include <algorithm> // std::min
#include <stdexcept> // std::invalid_argument
#include <utility> // std::move
#include "Util/Version.h"
#include "Util/TypeTraits.h"
#include "Util/SelectionRule.h"
#include "Util/CompInfo.h"
#include "Util/SimpleRandom.h"
#include "MatOp/internal/ArnoldiOp.h"
#include "LinAlg/UpperHessenbergQR.h"
#include "LinAlg/TridiagEigen.h"
#include "LinAlg/Lanczos.h"
namespace Spectra {
///
/// \defgroup EigenSolver Eigen Solvers
///
/// Eigen solvers for different types of problems.
///
///
/// \ingroup EigenSolver
///
/// This is the base class for symmetric eigen solvers, mainly for internal use.
/// It is kept here to provide the documentation for member functions of concrete eigen solvers
/// such as SymEigsSolver and SymEigsShiftSolver.
///
template <typename OpType, typename BOpType>
class SymEigsBase
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>;
using MapMat = Eigen::Map<Matrix>;
using MapVec = Eigen::Map<Vector>;
using MapConstVec = Eigen::Map<const Vector>;
using ArnoldiOpType = ArnoldiOp<Scalar, OpType, BOpType>;
using LanczosFac = Lanczos<Scalar, ArnoldiOpType>;
protected:
// clang-format off
// In SymEigsSolver and SymEigsShiftSolver, the A operator is an lvalue provided by
// the user. In SymGEigsSolver, the A operator is an rvalue. To avoid copying objects,
// we use the following scheme:
// 1. If the op parameter in the constructor is an lvalue, make m_op a const reference to op
// 2. If op is an rvalue, move op to m_op_container, and then make m_op a const
// reference to m_op_container[0]
std::vector<OpType> m_op_container;
const OpType& m_op; // matrix operator for A
const Index m_n; // dimension of matrix A
const Index m_nev; // number of eigenvalues requested
const Index m_ncv; // dimension of Krylov subspace in the Lanczos method
Index m_nmatop; // number of matrix operations called
Index m_niter; // number of restarting iterations
LanczosFac m_fac; // Lanczos factorization
Vector m_ritz_val; // Ritz values
private:
Matrix m_ritz_vec; // Ritz vectors
Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates
BoolArray m_ritz_conv; // indicator of the convergence of Ritz values
CompInfo m_info; // status of the computation
// clang-format on
// Move rvalue object to the container
static std::vector<OpType> create_op_container(OpType&& rval)
{
std::vector<OpType> container;
container.emplace_back(std::move(rval));
return container;
}
// Implicitly restarted Lanczos factorization
void restart(Index k, SortRule selection)
{
using std::abs;
if (k >= m_ncv)
return;
TridiagQR<Scalar> decomp(m_ncv);
Matrix Q = Matrix::Identity(m_ncv, m_ncv);
// Apply large shifts first
const int nshift = int(m_ncv - k);
Vector shifts = m_ritz_val.tail(nshift);
std::sort(shifts.data(), shifts.data() + nshift, [](const Scalar& v1, const Scalar& v2) { return abs(v1) > abs(v2); });
for (Index i = 0; i < nshift; i++)
{
// QR decomposition of H-mu*I, mu is the shift
decomp.compute(m_fac.matrix_H(), shifts[i]);
// Q -> Q * Qi
decomp.apply_YQ(Q);
// H -> Q'HQ
// Since QR = H - mu * I, we have H = QR + mu * I
// and therefore Q'HQ = RQ + mu * I
m_fac.compress_H(decomp);
}
m_fac.compress_V(Q);
m_fac.factorize_from(k, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
}
// Calculates the number of converged Ritz values
Index num_converged(const Scalar& tol)
{
using std::pow;
// The machine precision, ~= 1e-16 for the "double" type
constexpr Scalar eps = TypeTraits<Scalar>::epsilon();
// std::pow() is not constexpr, so we do not declare eps23 to be constexpr
// But most compilers should be able to compute eps23 at compile time
const Scalar eps23 = pow(eps, Scalar(2) / 3);
// thresh = tol * max(eps23, abs(theta)), theta for Ritz value
Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(eps23);
Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm();
// Converged "wanted" Ritz values
m_ritz_conv = (resid < thresh);
return m_ritz_conv.count();
}
// Returns the adjusted nev for restarting
Index nev_adjusted(Index nconv)
{
using std::abs;
// A very small value, but 1.0 / near_0 does not overflow
// ~= 1e-307 for the "double" type
constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10);
Index nev_new = m_nev;
for (Index i = m_nev; i < m_ncv; i++)
if (abs(m_ritz_est[i]) < near_0)
nev_new++;
// Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK
nev_new += (std::min)(nconv, (m_ncv - nev_new) / 2);
if (nev_new == 1 && m_ncv >= 6)
nev_new = m_ncv / 2;
else if (nev_new == 1 && m_ncv > 2)
nev_new = 2;
if (nev_new > m_ncv - 1)
nev_new = m_ncv - 1;
return nev_new;
}
// Retrieves and sorts Ritz values and Ritz vectors
void retrieve_ritzpair(SortRule selection)
{
TridiagEigen<Scalar> decomp(m_fac.matrix_H());
const Vector& evals = decomp.eigenvalues();
const Matrix& evecs = decomp.eigenvectors();
// Sort Ritz values and put the wanted ones at the beginning
std::vector<Index> ind = argsort(selection, evals, m_ncv);
// Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively
for (Index i = 0; i < m_ncv; i++)
{
m_ritz_val[i] = evals[ind[i]];
m_ritz_est[i] = evecs(m_ncv - 1, ind[i]);
}
for (Index i = 0; i < m_nev; i++)
{
m_ritz_vec.col(i).noalias() = evecs.col(ind[i]);
}
}
protected:
// Sorts the first nev Ritz pairs in the specified order
// This is used to return the final results
virtual void sort_ritzpair(SortRule sort_rule)
{
if ((sort_rule != SortRule::LargestAlge) && (sort_rule != SortRule::LargestMagn) &&
(sort_rule != SortRule::SmallestAlge) && (sort_rule != SortRule::SmallestMagn))
throw std::invalid_argument("unsupported sorting rule");
std::vector<Index> ind = argsort(sort_rule, m_ritz_val, m_nev);
Vector new_ritz_val(m_ncv);
Matrix new_ritz_vec(m_ncv, m_nev);
BoolArray new_ritz_conv(m_nev);
for (Index i = 0; i < m_nev; i++)
{
new_ritz_val[i] = m_ritz_val[ind[i]];
new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]);
new_ritz_conv[i] = m_ritz_conv[ind[i]];
}
m_ritz_val.swap(new_ritz_val);
m_ritz_vec.swap(new_ritz_vec);
m_ritz_conv.swap(new_ritz_conv);
}
public:
/// \cond
// If op is an lvalue
SymEigsBase(OpType& op, const BOpType& Bop, Index nev, Index ncv) :
m_op(op),
m_n(op.rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(op, Bop), m_ncv),
m_info(CompInfo::NotComputed)
{
if (nev < 1 || nev > m_n - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
if (ncv <= nev || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
}
// If op is an rvalue
SymEigsBase(OpType&& op, const BOpType& Bop, Index nev, Index ncv) :
m_op_container(create_op_container(std::move(op))),
m_op(m_op_container.front()),
m_n(m_op.rows()),
m_nev(nev),
m_ncv(ncv > m_n ? m_n : ncv),
m_nmatop(0),
m_niter(0),
m_fac(ArnoldiOpType(m_op, Bop), m_ncv),
m_info(CompInfo::NotComputed)
{
if (nev < 1 || nev > m_n - 1)
throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix");
if (ncv <= nev || ncv > m_n)
throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix");
}
///
/// Virtual destructor
///
virtual ~SymEigsBase() {}
/// \endcond
///
/// Initializes the solver by providing an initial residual vector.
///
/// \param init_resid Pointer to the initial residual vector.
///
/// **Spectra** (and also **ARPACK**) uses an iterative algorithm
/// to find eigenvalues. This function allows the user to provide the initial
/// residual vector.
///
void init(const Scalar* init_resid)
{
// Reset all matrices/vectors to zero
m_ritz_val.resize(m_ncv);
m_ritz_vec.resize(m_ncv, m_nev);
m_ritz_est.resize(m_ncv);
m_ritz_conv.resize(m_nev);
m_ritz_val.setZero();
m_ritz_vec.setZero();
m_ritz_est.setZero();
m_ritz_conv.setZero();
m_nmatop = 0;
m_niter = 0;
// Initialize the Lanczos factorization
MapConstVec v0(init_resid, m_n);
m_fac.init(v0, m_nmatop);
}
///
/// Initializes the solver by providing a random initial residual vector.
///
/// This overloaded function generates a random initial residual vector
/// (with a fixed random seed) for the algorithm. Elements in the vector
/// follow independent Uniform(-0.5, 0.5) distribution.
///
void init()
{
SimpleRandom<Scalar> rng(0);
Vector init_resid = rng.random_vec(m_n);
init(init_resid.data());
}
///
/// Conducts the major computation procedure.
///
/// \param selection An enumeration value indicating the selection rule of
/// the requested eigenvalues, for example `SortRule::LargestMagn`
/// to retrieve eigenvalues with the largest magnitude.
/// The full list of enumeration values can be found in
/// \ref Enumerations.
/// \param maxit Maximum number of iterations allowed in the algorithm.
/// \param tol Precision parameter for the calculated eigenvalues.
/// \param sorting Rule to sort the eigenvalues and eigenvectors.
/// Supported values are
/// `SortRule::LargestAlge`, `SortRule::LargestMagn`,
/// `SortRule::SmallestAlge`, and `SortRule::SmallestMagn`.
/// For example, `SortRule::LargestAlge` indicates that largest eigenvalues
/// come first. Note that this argument is only used to
/// **sort** the final result, and the **selection** rule
/// (e.g. selecting the largest or smallest eigenvalues in the
/// full spectrum) is specified by the parameter `selection`.
///
/// \return Number of converged eigenvalues.
///
Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 1000,
Scalar tol = 1e-10, SortRule sorting = SortRule::LargestAlge)
{
// The m-step Lanczos factorization
m_fac.factorize_from(1, m_ncv, m_nmatop);
retrieve_ritzpair(selection);
// Restarting
Index i, nconv = 0, nev_adj;
for (i = 0; i < maxit; i++)
{
nconv = num_converged(tol);
if (nconv >= m_nev)
break;
nev_adj = nev_adjusted(nconv);
restart(nev_adj, selection);
}
// Sorting results
sort_ritzpair(sorting);
m_niter += i + 1;
m_info = (nconv >= m_nev) ? CompInfo::Successful : CompInfo::NotConverging;
return (std::min)(m_nev, nconv);
}
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Returns the number of iterations used in the computation.
///
Index num_iterations() const { return m_niter; }
///
/// Returns the number of matrix operations used in the computation.
///
Index num_operations() const { return m_nmatop; }
///
/// Returns the converged eigenvalues.
///
/// \return A vector containing the eigenvalues.
/// Returned vector type will be `Eigen::Vector<Scalar, ...>`, depending on
/// the template parameter `Scalar` defined.
///
Vector eigenvalues() const
{
//const Index nconv = m_ritz_conv.count();
//Vector res(nconv);
//if (!nconv)
// return res;
//Index j = 0;
//for (Index i = 0; i < m_nev; i++)
//{
// if (m_ritz_conv[i])
// {
// res[j] = m_ritz_val[i];
// j++;
// }
//}
//return res;
Vector res(m_nev);
for (Index i = 0; i < m_nev; i++)
res[i] = m_ritz_val[i];
return res;
}
///
/// Returns the eigenvectors associated with the converged eigenvalues.
///
/// \param nvec The number of eigenvectors to return.
///
/// \return A matrix containing the eigenvectors.
/// Returned matrix type will be `Eigen::Matrix<Scalar, ...>`,
/// depending on the template parameter `Scalar` defined.
///
virtual Matrix eigenvectors(Index nvec) const
{
const Index nconv = m_ritz_conv.count();
nvec = (std::min)(nvec, nconv);
Matrix res(m_n, nvec);
if (!nvec)
return res;
Matrix ritz_vec_conv(m_ncv, nvec);
Index j = 0;
for (Index i = 0; i < m_nev && j < nvec; i++)
{
if (m_ritz_conv[i])
{
ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i);
j++;
}
}
res.noalias() = m_fac.matrix_V() * ritz_vec_conv;
return res;
}
///
/// Returns all converged eigenvectors.
///
virtual Matrix eigenvectors() const
{
return eigenvectors(m_nev);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_EIGS_BASE_H
| 15,290
| 32.169197
| 127
|
h
|
null |
LRMI-main/Spectra/SymEigsShiftSolver.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
#define SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
#include <Eigen/Core>
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymShiftSolve.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices using
/// the **shift-and-invert mode**. The background information of the symmetric
/// eigen solver is documented in the SymEigsSolver class. Here we focus on
/// explaining the shift-and-invert mode.
///
/// The shift-and-invert mode is based on the following fact:
/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of
/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$,
/// we have
/// \f[(A-\sigma I)^{-1}x=\nu x\f]
/// where
/// \f[\nu=\frac{1}{\lambda-\sigma}\f]
/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix
/// \f$(A-\sigma I)^{-1}\f$.
///
/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$
/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired
/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting
/// that \f$\lambda=\sigma+\nu^{-1}\f$.
///
/// The reason why we need this type of manipulation is that
/// the algorithm of **Spectra** (and also **ARPACK**)
/// is good at finding eigenvalues with large magnitude, but may fail in looking
/// for eigenvalues that are close to zero. However, if we really need them, we
/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then
/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$
/// implies smallest values of \f$\lambda\f$.
///
/// To summarize, in the shift-and-invert mode, the selection rule will apply to
/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule
/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of
/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues()
/// method will always return the eigenvalues in the original problem (i.e.,
/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the
/// same for both the original problem and the shifted-and-inverted problem.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymShiftSolve and
/// SparseSymShiftSolve, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymShiftSolve.
///
/// Below is an example that illustrates the use of the shift-and-invert mode:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// // <Spectra/MatOp/DenseSymShiftSolve.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // A size-10 diagonal matrix with elements 1, 2, ..., 10
/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10);
/// for (int i = 0; i < M.rows(); i++)
/// M(i, i) = i + 1;
///
/// // Construct matrix operation object using the wrapper class
/// DenseSymShiftSolve<double> op(M);
///
/// // Construct eigen solver object with shift 0
/// // This will find eigenvalues that are closest to 0
/// SymEigsShiftSolver<DenseSymShiftSolve<double>> eigs(op, 3, 6, 0.0);
///
/// eigs.init();
/// eigs.compute(SortRule::LargestMagn);
/// if (eigs.info() == CompInfo::Successful)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (3.0, 2.0, 1.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
/// Also an example for user-supplied matrix shift-solve operation class:
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Spectra/SymEigsShiftSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTenShiftSolve
/// {
/// private:
/// double sigma_;
/// public:
/// using Scalar = double; // A typedef named "Scalar" is required
/// int rows() { return 10; }
/// int cols() { return 10; }
/// void set_shift(double sigma) { sigma_ = sigma; }
/// // y_out = inv(A - sigma * I) * x_in
/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...)
/// void perform_op(double *x_in, double *y_out) const
/// {
/// for (int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] / (i + 1 - sigma_);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTenShiftSolve op;
/// // Find three eigenvalues that are closest to 3.14
/// SymEigsShiftSolver<MyDiagonalTenShiftSolve> eigs(op, 3, 6, 3.14);
/// eigs.init();
/// eigs.compute(SortRule::LargestMagn);
/// if (eigs.info() == CompInfo::Successful)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (4.0, 3.0, 2.0)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename OpType = DenseSymShiftSolve<double>>
class SymEigsShiftSolver : public SymEigsBase<OpType, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using Base = SymEigsBase<OpType, IdentityBOp>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
m_ritz_val.head(m_nev).array() = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma;
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a eigen solver object using the shift-and-invert mode.
///
/// \param op The matrix operation object that implements
/// the shift-solve operation of \f$A\f$: calculating
/// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymShiftSolve, or
/// define their own that implements all the public members
/// as in DenseSymShiftSolve.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv_` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymEigsShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigma) :
Base(op, IdentityBOp(), nev, ncv),
m_sigma(sigma)
{
op.set_shift(m_sigma);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
| 7,762
| 37.621891
| 98
|
h
|
null |
LRMI-main/Spectra/SymEigsSolver.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_EIGS_SOLVER_H
#define SPECTRA_SYM_EIGS_SOLVER_H
#include "Eigen/Core"
#include "SymEigsBase.h"
#include "Util/SelectionRule.h"
#include "MatOp/DenseSymMatProd.h"
namespace Spectra {
///
/// \ingroup EigenSolver
///
/// This class implements the eigen solver for real symmetric matrices, i.e.,
/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric.
///
/// **Spectra** is designed to calculate a specified number (\f$k\f$)
/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much
/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues
/// and eigenvectors are computed.
///
/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires
/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of
/// this solver need to supply a class that computes the result of \f$Av\f$
/// for any given vector \f$v\f$. The name of this class should be given to
/// the template parameter `OpType`, and instance of this class passed to
/// the constructor of SymEigsSolver.
///
/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**,
/// for example `Eigen::MatrixXd`, then there is an easy way to construct such a
/// matrix operation class, by using the built-in wrapper class DenseSymMatProd
/// that wraps an existing matrix object in **Eigen**. This is also the
/// default template parameter for SymEigsSolver. For sparse matrices, the
/// wrapper class SparseSymMatProd can be used similarly.
///
/// If the users need to define their own matrix-vector multiplication operation
/// class, it should define a public type `Scalar` to indicate the element type,
/// and implement all the public member functions as in DenseSymMatProd.
///
/// \tparam OpType The name of the matrix operation class. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymMatProd.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include "Eigen/Core"
/// #include <Spectra/SymEigsSolver.h>
/// // <Spectra/MatOp/DenseSymMatProd.h> is implicitly included
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to calculate the eigenvalues of M
/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10);
/// Eigen::MatrixXd M = A + A.transpose();
///
/// // Construct matrix operation object using the wrapper class DenseSymMatProd
/// DenseSymMatProd<double> op(M);
///
/// // Construct eigen solver object, requesting the largest three eigenvalues
/// SymEigsSolver<DenseSymMatProd<double>> eigs(op, 3, 6);
///
/// // Initialize and compute
/// eigs.init();
/// int nconv = eigs.compute(SortRule::LargestAlge);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// if (eigs.info() == CompInfo::Successful)
/// evalues = eigs.eigenvalues();
///
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
///
/// return 0;
/// }
/// \endcode
///
/// And here is an example for user-supplied matrix operation class.
///
/// \code{.cpp}
/// #include "Eigen/Core"
/// #include <Spectra/SymEigsSolver.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// // M = diag(1, 2, ..., 10)
/// class MyDiagonalTen
/// {
/// public:
/// using Scalar = double; // A typedef named "Scalar" is required
/// int rows() { return 10; }
/// int cols() { return 10; }
/// // y_out = M * x_in
/// void perform_op(double *x_in, double *y_out) const
/// {
/// for (int i = 0; i < rows(); i++)
/// {
/// y_out[i] = x_in[i] * (i + 1);
/// }
/// }
/// };
///
/// int main()
/// {
/// MyDiagonalTen op;
/// SymEigsSolver<MyDiagonalTen> eigs(op, 3, 6);
/// eigs.init();
/// eigs.compute(SortRule::LargestAlge);
/// if (eigs.info() == CompInfo::Successful)
/// {
/// Eigen::VectorXd evalues = eigs.eigenvalues();
/// // Will get (10, 9, 8)
/// std::cout << "Eigenvalues found:\n" << evalues << std::endl;
/// }
///
/// return 0;
/// }
/// \endcode
///
template <typename OpType = DenseSymMatProd<double>>
class SymEigsSolver : public SymEigsBase<OpType, IdentityBOp>
{
private:
using Index = Eigen::Index;
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that implements
/// the matrix-vector multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper class such as DenseSymMatProd, or
/// define their own that implements all the public members
/// as in DenseSymMatProd.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymEigsSolver(OpType& op, Index nev, Index ncv) :
SymEigsBase<OpType, IdentityBOp>(op, IdentityBOp(), nev, ncv)
{}
};
} // namespace Spectra
#endif // SPECTRA_SYM_EIGS_SOLVER_H
| 6,053
| 35.690909
| 96
|
h
|
null |
LRMI-main/Spectra/SymGEigsShiftSolver.h
|
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
#define SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
#include <utility> // std::move
#include "SymEigsBase.h"
#include "Util/GEigsMode.h"
#include "MatOp/internal/SymGEigsShiftInvertOp.h"
#include "MatOp/internal/SymGEigsBucklingOp.h"
#include "MatOp/internal/SymGEigsCayleyOp.h"
namespace Spectra {
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ and \f$B\f$ are symmetric
/// matrices. A spectral transform is applied to seek interior
/// generalized eigenvalues with respect to some shift \f$\sigma\f$.
///
/// There are different modes of this solver, specified by the template parameter `Mode`.
/// See the pages for the specialized classes for details.
/// - The shift-and-invert mode transforms the problem into \f$(A-\sigma B)^{-1}Bx=\nu x\f$,
/// where \f$\nu=1/(\lambda-\sigma)\f$. This mode assumes that \f$B\f$ is positive definite.
/// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert>
/// "SymGEigsShiftSolver (Shift-and-invert mode)" for more details.
/// - The buckling mode transforms the problem into \f$(A-\sigma B)^{-1}Ax=\nu x\f$,
/// where \f$\nu=\lambda/(\lambda-\sigma)\f$. This mode assumes that \f$A\f$ is positive definite.
/// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling>
/// "SymGEigsShiftSolver (Buckling mode)" for more details.
/// - The Cayley mode transforms the problem into \f$(A-\sigma B)^{-1}(A+\sigma B)x=\nu x\f$,
/// where \f$\nu=(\lambda+\sigma)/(\lambda-\sigma)\f$. This mode assumes that \f$B\f$ is positive definite.
/// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Cayley>
/// "SymGEigsShiftSolver (Cayley mode)" for more details.
// Empty class template
template <typename OpType, typename BOpType, GEigsMode Mode>
class SymGEigsShiftSolver
{};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using the shift-and-invert spectral transformation. The original problem is
/// to solve \f$Ax=\lambda Bx\f$, where \f$A\f$ is symmetric and \f$B\f$ is positive definite.
/// The transformed problem is \f$(A-\sigma B)^{-1}Bx=\nu x\f$, where
/// \f$\nu=1/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift.
///
/// This solver requires two matrix operation objects: one to compute \f$y=(A-\sigma B)^{-1}x\f$
/// for any vector \f$v\f$, and one for the matrix multiplication \f$Bv\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation object
/// can be created using the SymShiftInvert class, and the second one can be created
/// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their
/// own operation classes, then they should implement all the public member functions as
/// in those built-in classes.
///
/// \tparam OpType The type of the first operation object. Users could either
/// use the wrapper class SymShiftInvert, or define their own that implements
/// the type definition `Scalar` and all the public member functions as in SymShiftInvert.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements all the
/// public member functions as in DenseSymMatProd.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::ShiftInvert.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/SymGEigsShiftSolver.h>
/// #include <Spectra/MatOp/SymShiftInvert.h>
/// #include <Spectra/MatOp/SparseSymMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem
/// // A * x = lambda * B * x,
/// // where A is symmetric and B is positive definite
/// const int n = 100;
///
/// // Define the A matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd A = M + M.transpose();
///
/// // Define the B matrix, a tridiagonal matrix with 2 on the diagonal
/// // and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> B(n, n);
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// B.insert(i, i) = 2.0;
/// if (i > 0)
/// B.insert(i - 1, i) = 1.0;
/// if (i < n - 1)
/// B.insert(i + 1, i) = 1.0;
/// }
///
/// // Construct matrix operation objects using the wrapper classes
/// // A is dense, B is sparse
/// using OpType = SymShiftInvert<double, Eigen::Dense, Eigen::Sparse>;
/// using BOpType = SparseSymMatProd<double>;
/// OpType op(A, B);
/// BOpType Bop(B);
///
/// // Construct generalized eigen solver object, seeking three generalized
/// // eigenvalues that are closest to zero. This is equivalent to specifying
/// // a shift sigma = 0.0 combined with the SortRule::LargestMagn selection rule
/// SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert>
/// geigs(op, Bop, 3, 6, 0.0);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute(SortRule::LargestMagn);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if (geigs.info() == CompInfo::Successful)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Number of converged generalized eigenvalues: " << nconv << std::endl;
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for mode = GEigsMode::ShiftInvert
template <typename OpType, typename BOpType>
class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert> :
public SymEigsBase<SymGEigsShiftInvertOp<OpType, BOpType>, BOpType>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsShiftInvertOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// Set shift and forward
static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma)
{
op.set_shift(sigma);
return std::move(op);
}
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = 1 / nu + sigma
m_ritz_val.head(m_nev).array() = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma;
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that computes \f$y=(A-\sigma B)^{-1}v\f$
/// for any vector \f$v\f$. Users could either create the object from the
/// wrapper class SymShiftInvert, or define their own that implements all
/// the public members as in SymShiftInvert.
/// \param Bop The \f$B\f$ matrix operation object that implements the matrix-vector
/// multiplication \f$Bv\f$. Users could either create the object from the
/// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) :
Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv),
m_sigma(sigma)
{}
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices in the buckling mode. The original problem is
/// to solve \f$Kx=\lambda K_G x\f$, where \f$K\f$ is positive definite and \f$K_G\f$ is symmetric.
/// The transformed problem is \f$(K-\sigma K_G)^{-1}Kx=\nu x\f$, where
/// \f$\nu=\lambda/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift.
///
/// This solver requires two matrix operation objects: one to compute \f$y=(K-\sigma K_G)^{-1}x\f$
/// for any vector \f$v\f$, and one for the matrix multiplication \f$Kv\f$.
///
/// If \f$K\f$ and \f$K_G\f$ are stored as Eigen matrices, then the first operation object
/// can be created using the SymShiftInvert class, and the second one can be created
/// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their
/// own operation classes, then they should implement all the public member functions as
/// in those built-in classes.
///
/// \tparam OpType The type of the first operation object. Users could either
/// use the wrapper class SymShiftInvert, or define their own that implements
/// the type definition `Scalar` and all the public member functions as in SymShiftInvert.
/// \tparam BOpType The name of the matrix operation class for \f$K\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements all the
/// public member functions as in DenseSymMatProd.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::Buckling.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Spectra/SymGEigsShiftSolver.h>
/// #include <Spectra/MatOp/SymShiftInvert.h>
/// #include <Spectra/MatOp/SparseSymMatProd.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem
/// // K * x = lambda * KG * x,
/// // where K is positive definite, and KG is symmetric
/// const int n = 100;
///
/// // Define the K matrix, a tridiagonal matrix with 2 on the diagonal
/// // and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> K(n, n);
/// K.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// K.insert(i, i) = 2.0;
/// if (i > 0)
/// K.insert(i - 1, i) = 1.0;
/// if (i < n - 1)
/// K.insert(i + 1, i) = 1.0;
/// }
///
/// // Define the KG matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd KG = M + M.transpose();
///
/// // Construct matrix operation objects using the wrapper classes
/// // K is sparse, KG is dense
/// using OpType = SymShiftInvert<double, Eigen::Sparse, Eigen::Dense>;
/// using BOpType = SparseSymMatProd<double>;
/// OpType op(K, KG);
/// BOpType Bop(K);
///
/// // Construct generalized eigen solver object, seeking three generalized
/// // eigenvalues that are closest to and larger than 1.0. This is equivalent to
/// // specifying a shift sigma = 1.0 combined with the SortRule::LargestAlge
/// // selection rule
/// SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling>
/// geigs(op, Bop, 3, 6, 1.0);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute(SortRule::LargestAlge);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if (geigs.info() == CompInfo::Successful)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Number of converged generalized eigenvalues: " << nconv << std::endl;
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for mode = GEigsMode::Buckling
template <typename OpType, typename BOpType>
class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling> :
public SymEigsBase<SymGEigsBucklingOp<OpType, BOpType>, BOpType>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsBucklingOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// Set shift and forward
static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma)
{
if (sigma == Scalar(0))
throw std::invalid_argument("SymGEigsShiftSolver: sigma cannot be zero in the buckling mode");
op.set_shift(sigma);
return std::move(op);
}
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = lambda / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = sigma * nu / (nu - 1)
m_ritz_val.head(m_nev).array() = m_sigma * m_ritz_val.head(m_nev).array() /
(m_ritz_val.head(m_nev).array() - Scalar(1));
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that computes \f$y=(K-\sigma K_G)^{-1}v\f$
/// for any vector \f$v\f$. Users could either create the object from the
/// wrapper class SymShiftInvert, or define their own that implements all
/// the public members as in SymShiftInvert.
/// \param Bop The \f$K\f$ matrix operation object that implements the matrix-vector
/// multiplication \f$Kv\f$. Users could either create the object from the
/// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd. \f$K\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) :
Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv),
m_sigma(sigma)
{}
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using the Cayley spectral transformation. The original problem is
/// to solve \f$Ax=\lambda Bx\f$, where \f$A\f$ is symmetric and \f$B\f$ is positive definite.
/// The transformed problem is \f$(A-\sigma B)^{-1}(A+\sigma B)x=\nu x\f$, where
/// \f$\nu=(\lambda+\sigma)/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift.
///
/// This solver requires two matrix operation objects: one to compute \f$y=(A-\sigma B)^{-1}x\f$
/// for any vector \f$v\f$, and one for the matrix multiplication \f$Bv\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation object
/// can be created using the SymShiftInvert class, and the second one can be created
/// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their
/// own operation classes, then they should implement all the public member functions as
/// in those built-in classes.
///
/// \tparam OpType The type of the first operation object. Users could either
/// use the wrapper class SymShiftInvert, or define their own that implements
/// the type definition `Scalar` and all the public member functions as in SymShiftInvert.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements all the
/// public member functions as in DenseSymMatProd.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::Cayley.
// Partial specialization for mode = GEigsMode::Cayley
template <typename OpType, typename BOpType>
class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Cayley> :
public SymEigsBase<SymGEigsCayleyOp<OpType, BOpType>, BOpType>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsCayleyOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
using Base::m_nev;
using Base::m_ritz_val;
const Scalar m_sigma;
// Set shift and forward
static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma)
{
if (sigma == Scalar(0))
throw std::invalid_argument("SymGEigsShiftSolver: sigma cannot be zero in the Cayley mode");
op.set_shift(sigma);
return std::move(op);
}
// First transform back the Ritz values, and then sort
void sort_ritzpair(SortRule sort_rule) override
{
// The eigenvalues we get from the iteration is nu = (lambda + sigma) / (lambda - sigma)
// So the eigenvalues of the original problem is lambda = sigma * (nu + 1) / (nu - 1)
m_ritz_val.head(m_nev).array() = m_sigma * (m_ritz_val.head(m_nev).array() + Scalar(1)) /
(m_ritz_val.head(m_nev).array() - Scalar(1));
Base::sort_ritzpair(sort_rule);
}
public:
///
/// Constructor to create a solver object.
///
/// \param op The matrix operation object that computes \f$y=(A-\sigma B)^{-1}v\f$
/// for any vector \f$v\f$. Users could either create the object from the
/// wrapper class SymShiftInvert, or define their own that implements all
/// the public members as in SymShiftInvert.
/// \param Bop The \f$B\f$ matrix operation object that implements the matrix-vector
/// multiplication \f$Bv\f$. Users could either create the object from the
/// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or
/// define their own that implements all the public member functions
/// as in DenseSymMatProd. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
/// \param sigma The value of the shift.
///
SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) :
Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv),
m_sigma(sigma)
{}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
| 21,455
| 45.241379
| 109
|
h
|
null |
LRMI-main/Spectra/SymGEigsSolver.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_SOLVER_H
#define SPECTRA_SYM_GEIGS_SOLVER_H
#include "SymEigsBase.h"
#include "Util/GEigsMode.h"
#include "MatOp/internal/SymGEigsCholeskyOp.h"
#include "MatOp/internal/SymGEigsRegInvOp.h"
namespace Spectra {
///
/// \defgroup GEigenSolver Generalized Eigen Solvers
///
/// Generalized eigen solvers for different types of problems.
///
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and
/// \f$B\f$ is positive definite.
///
/// There are two modes of this solver, specified by the template parameter `Mode`.
/// See the pages for the specialized classes for details.
/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky
/// decomposition, which is the preferred mode when the decomposition is
/// available. (This can be easily done in Eigen using the dense or sparse
/// Cholesky solver.)
/// See \ref SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky> "SymGEigsSolver (Cholesky mode)" for more details.
/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the
/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be
/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or
/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition.
/// See \ref SymGEigsSolver<OpType, BOpType, GEigsMode::RegularInverse> "SymGEigsSolver (Regular inverse mode)" for more details.
// Empty class template
template <typename OpType, typename BOpType, GEigsMode Mode>
class SymGEigsSolver
{};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky
/// decomposition \f$B=LL'\f$.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower
/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the DenseCholesky or SparseCholesky
/// classes. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper classes such as DenseCholesky and
/// SparseCholesky, or define their own that implements all the
/// public member functions as in DenseCholesky.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::Cholesky.
///
/// Below is an example that demonstrates the usage of this class.
///
/// \code{.cpp}
/// #include <Eigen/Core>
/// #include <Eigen/SparseCore>
/// #include <Eigen/Eigenvalues>
/// #include <Spectra/SymGEigsSolver.h>
/// #include <Spectra/MatOp/DenseSymMatProd.h>
/// #include <Spectra/MatOp/SparseCholesky.h>
/// #include <iostream>
///
/// using namespace Spectra;
///
/// int main()
/// {
/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x
/// const int n = 100;
///
/// // Define the A matrix
/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n);
/// Eigen::MatrixXd A = M + M.transpose();
///
/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals
/// Eigen::SparseMatrix<double> B(n, n);
/// B.reserve(Eigen::VectorXi::Constant(n, 3));
/// for (int i = 0; i < n; i++)
/// {
/// B.insert(i, i) = 2.0;
/// if (i > 0)
/// B.insert(i - 1, i) = 1.0;
/// if (i < n - 1)
/// B.insert(i + 1, i) = 1.0;
/// }
///
/// // Construct matrix operation objects using the wrapper classes
/// DenseSymMatProd<double> op(A);
/// SparseCholesky<double> Bop(B);
///
/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues
/// SymGEigsSolver<DenseSymMatProd<double>, SparseCholesky<double>, GEigsMode::Cholesky>
/// geigs(op, Bop, 3, 6);
///
/// // Initialize and compute
/// geigs.init();
/// int nconv = geigs.compute(SortRule::LargestAlge);
///
/// // Retrieve results
/// Eigen::VectorXd evalues;
/// Eigen::MatrixXd evecs;
/// if (geigs.info() == CompInfo::Successful)
/// {
/// evalues = geigs.eigenvalues();
/// evecs = geigs.eigenvectors();
/// }
///
/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl;
/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl;
///
/// // Verify results using the generalized eigen solver in Eigen
/// Eigen::MatrixXd Bdense = B;
/// Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(A, Bdense);
///
/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl;
/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl;
///
/// return 0;
/// }
/// \endcode
// Partial specialization for mode = GEigsMode::Cholesky
template <typename OpType, typename BOpType>
class SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky> :
public SymEigsBase<SymGEigsCholeskyOp<OpType, BOpType>, IdentityBOp>
{
private:
using Scalar = typename OpType::Scalar;
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using ModeMatOp = SymGEigsCholeskyOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, IdentityBOp>;
const BOpType& m_Bop;
public:
///
/// Constructor to create a solver object.
///
/// \param op The \f$A\f$ matrix operation object that implements the matrix-vector
/// multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public members
/// as in DenseSymMatProd.
/// \param Bop The \f$B\f$ matrix operation object that represents a Cholesky decomposition of \f$B\f$.
/// It should implement the lower and upper triangular solving operations:
/// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector
/// \f$v\f$, where \f$LL'=B\f$. Users could either
/// create the object from the wrapper classes such as DenseCholesky, or
/// define their own that implements all the public member functions
/// as in DenseCholesky. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType& op, BOpType& Bop, Index nev, Index ncv) :
Base(ModeMatOp(op, Bop), IdentityBOp(), nev, ncv),
m_Bop(Bop)
{}
/// \cond
Matrix eigenvectors(Index nvec) const override
{
Matrix res = Base::eigenvectors(nvec);
Vector tmp(res.rows());
const Index nconv = res.cols();
for (Index i = 0; i < nconv; i++)
{
m_Bop.upper_triangular_solve(&res(0, i), tmp.data());
res.col(i).noalias() = tmp;
}
return res;
}
Matrix eigenvectors() const override
{
return SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky>::eigenvectors(this->m_nev);
}
/// \endcond
};
///
/// \ingroup GEigenSolver
///
/// This class implements the generalized eigen solver for real symmetric
/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$
/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations
/// defined below.
///
/// This solver requires two matrix operation objects: one for \f$A\f$ that implements
/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the
/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$.
///
/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation
/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and
/// the second operation can be created using the SparseRegularInverse class. There is no
/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode
/// is always preferred. If the users need to define their own operation classes, then they
/// should implement all the public member functions as in those built-in classes.
///
/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either
/// use the wrapper classes such as DenseSymMatProd and
/// SparseSymMatProd, or define their own that implements the type
/// definition `Scalar` and all the public member functions as in
/// DenseSymMatProd.
/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either
/// use the wrapper class SparseRegularInverse, or define their
/// own that implements all the public member functions as in
/// SparseRegularInverse.
/// \tparam Mode Mode of the generalized eigen solver. In this solver
/// it is Spectra::GEigsMode::RegularInverse.
///
// Partial specialization for mode = GEigsMode::RegularInverse
template <typename OpType, typename BOpType>
class SymGEigsSolver<OpType, BOpType, GEigsMode::RegularInverse> :
public SymEigsBase<SymGEigsRegInvOp<OpType, BOpType>, BOpType>
{
private:
using Index = Eigen::Index;
using ModeMatOp = SymGEigsRegInvOp<OpType, BOpType>;
using Base = SymEigsBase<ModeMatOp, BOpType>;
public:
///
/// Constructor to create a solver object.
///
/// \param op The \f$A\f$ matrix operation object that implements the matrix-vector
/// multiplication operation of \f$A\f$:
/// calculating \f$Av\f$ for any vector \f$v\f$. Users could either
/// create the object from the wrapper classes such as DenseSymMatProd, or
/// define their own that implements all the public members
/// as in DenseSymMatProd.
/// \param Bop The \f$B\f$ matrix operation object that implements the multiplication operation
/// \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$ for any vector \f$v\f$.
/// Users could either create the object from the wrapper class SparseRegularInverse, or
/// define their own that implements all the public member functions
/// as in SparseRegularInverse. \f$B\f$ needs to be positive definite.
/// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$,
/// where \f$n\f$ is the size of matrix.
/// \param ncv Parameter that controls the convergence speed of the algorithm.
/// Typically a larger `ncv` means faster convergence, but it may
/// also result in greater memory use and more matrix operations
/// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$,
/// and is advised to take \f$ncv \ge 2\cdot nev\f$.
///
SymGEigsSolver(OpType& op, BOpType& Bop, Index nev, Index ncv) :
Base(ModeMatOp(op, Bop), Bop, nev, ncv)
{}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_SOLVER_H
| 13,190
| 44.329897
| 131
|
h
|
null |
LRMI-main/Spectra/LinAlg/Arnoldi.h
|
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_ARNOLDI_H
#define SPECTRA_ARNOLDI_H
#include "Eigen/Core"
#include <cmath> // std::sqrt
#include <utility> // std::move
#include <stdexcept> // std::invalid_argument
#include "../MatOp/internal/ArnoldiOp.h"
#include "../Util/TypeTraits.h"
#include "../Util/SimpleRandom.h"
#include "UpperHessenbergQR.h"
#include "DoubleShiftQR.h"
namespace Spectra {
// Arnoldi factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Arnoldi
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapVec = Eigen::Map<Vector>;
using MapConstMat = Eigen::Map<const Matrix>;
using MapConstVec = Eigen::Map<const Vector>;
protected:
// A very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
static constexpr Scalar m_near_0 = TypeTraits<Scalar>::min() * Scalar(10);
// The machine precision, ~= 1e-16 for the "double" type
static constexpr Scalar m_eps = TypeTraits<Scalar>::epsilon();
ArnoldiOpType m_op; // Operators for the Arnoldi factorization
const Index m_n; // dimension of A
const Index m_m; // maximum dimension of subspace V
Index m_k; // current dimension of subspace V
Matrix m_fac_V; // V matrix in the Arnoldi factorization
Matrix m_fac_H; // H matrix in the Arnoldi factorization
Vector m_fac_f; // residual in the Arnoldi factorization
Scalar m_beta; // ||f||, B-norm of f
// Given orthonormal basis V (w.r.t. B), find a nonzero vector f such that V'Bf = 0
// With rounding errors, we hope V'B(f/||f||) < eps
// Assume that f has been properly allocated
void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm, Index& op_counter)
{
using std::sqrt;
Vector v(m_n), Vf(V.cols());
for (Index iter = 0; iter < 5; iter++)
{
// Randomly generate a new vector and orthogonalize it against V
SimpleRandom<Scalar> rng((unsigned long) (seed + 123 * iter));
// The first try forces f to be in the range of A
if (iter == 0)
{
rng.random_vec(v);
m_op.perform_op(v.data(), f.data());
op_counter++;
}
else
{
rng.random_vec(f);
}
// f <- f - V * V'Bf, so that f is orthogonal to V in B-norm
m_op.trans_product(V, f, Vf);
f.noalias() -= V * Vf;
// fnorm <- ||f||
fnorm = m_op.norm(f);
// Compute V'Bf again
m_op.trans_product(V, f, Vf);
// Test whether V'B(f/||f||) < eps
Scalar ortho_err = Vf.cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 3 && ortho_err >= m_eps * fnorm)
{
// f <- f - V * Vf
f.noalias() -= V * Vf;
// beta <- ||f||
fnorm = m_op.norm(f);
m_op.trans_product(V, f, Vf);
ortho_err = Vf.cwiseAbs().maxCoeff();
count++;
}
// If the condition is satisfied, simply return
// Otherwise, go to the next iteration and try a new random vector
if (ortho_err < m_eps * fnorm)
return;
}
}
public:
// Copy an ArnoldiOp
Arnoldi(const ArnoldiOpType& op, Index m) :
m_op(op), m_n(op.rows()), m_m(m), m_k(0)
{}
// Move an ArnoldiOp
Arnoldi(ArnoldiOpType&& op, Index m) :
m_op(std::move(op)), m_n(op.rows()), m_m(m), m_k(0)
{}
// Const-reference to internal structures
const Matrix& matrix_V() const { return m_fac_V; }
const Matrix& matrix_H() const { return m_fac_H; }
const Vector& vector_f() const { return m_fac_f; }
Scalar f_norm() const { return m_beta; }
Index subspace_dim() const { return m_k; }
// Initialize with an operator and an initial vector
void init(MapConstVec& v0, Index& op_counter)
{
m_fac_V.resize(m_n, m_m);
m_fac_H.resize(m_m, m_m);
m_fac_f.resize(m_n);
m_fac_H.setZero();
// Verify the initial vector
const Scalar v0norm = m_op.norm(v0);
if (v0norm < m_near_0)
throw std::invalid_argument("initial residual vector cannot be zero");
// Points to the first column of V
MapVec v(m_fac_V.data(), m_n);
// Force v to be in the range of A, i.e., v = A * v0
m_op.perform_op(v0.data(), v.data());
op_counter++;
// Normalize
const Scalar vnorm = m_op.norm(v);
v /= vnorm;
// Compute H and f
Vector w(m_n);
m_op.perform_op(v.data(), w.data());
op_counter++;
m_fac_H(0, 0) = m_op.inner_product(v, w);
m_fac_f.noalias() = w - v * m_fac_H(0, 0);
// In some cases f is zero in exact arithmetics, but due to rounding errors
// it may contain tiny fluctuations. When this happens, we force f to be zero
if (m_fac_f.cwiseAbs().maxCoeff() < m_eps)
{
m_fac_f.setZero();
m_beta = Scalar(0);
}
else
{
m_beta = m_op.norm(m_fac_f);
}
// Indicate that this is a step-1 factorization
m_k = 1;
}
// Arnoldi factorization starting from step-k
virtual void factorize_from(Index from_k, Index to_m, Index& op_counter)
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::string msg = "Arnoldi: from_k (= " + std::to_string(from_k) +
") is larger than the current subspace dimension (= " + std::to_string(m_k) + ")";
throw std::invalid_argument(msg);
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
expand_basis(V, 2 * i, m_fac_f, m_beta, op_counter);
restart = true;
}
// v <- f / ||f||
m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
// w <- A * v, v = m_fac_V.col(i)
m_op.perform_op(&m_fac_V(0, i), w.data());
op_counter++;
const Index i1 = i + 1;
// First i+1 columns of V
MapConstMat Vs(m_fac_V.data(), m_n, i1);
// h = m_fac_H(0:i, i)
MapVec h(&m_fac_H(0, i), i1);
// h <- V'Bw
m_op.trans_product(Vs, w, h);
// f <- w - V * h
m_fac_f.noalias() = w - Vs * h;
m_beta = m_op.norm(m_fac_f);
if (m_beta > Scalar(0.717) * m_op.norm(h))
continue;
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
h.noalias() += Vf.head(i1);
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a double shift QR decomposition
void compress_H(const DoubleShiftQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k -= 2;
}
// Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition
void compress_H(const UpperHessenbergQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
// Apply V -> VQ and compute the new f.
// Should be called after compress_H(), since m_k is updated there.
// Only need to update the first k+1 columns of V
// The first (m - k + i) elements of the i-th column of Q are non-zero,
// and the rest are zero
void compress_V(const Matrix& Q)
{
Matrix Vs(m_n, m_k + 1);
for (Index i = 0; i < m_k; i++)
{
const Index nnz = m_m - m_k + i + 1;
MapConstVec q(&Q(0, i), nnz);
Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q;
}
Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k);
m_fac_V.leftCols(m_k + 1).noalias() = Vs;
Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1);
m_fac_f.swap(fk);
m_beta = m_op.norm(m_fac_f);
}
};
} // namespace Spectra
#endif // SPECTRA_ARNOLDI_H
| 10,932
| 33.598101
| 100
|
h
|
null |
LRMI-main/Spectra/LinAlg/DoubleShiftQR.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DOUBLE_SHIFT_QR_H
#define SPECTRA_DOUBLE_SHIFT_QR_H
#include "Eigen/Core"
#include <vector> // std::vector
#include <algorithm> // std::min, std::fill, std::copy
#include <utility> // std::swap
#include <cmath> // std::abs, std::sqrt, std::pow
#include <stdexcept> // std::invalid_argument, std::logic_error
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class DoubleShiftQR
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Matrix3X = Eigen::Matrix<Scalar, 3, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using IntArray = Eigen::Array<unsigned char, Eigen::Dynamic, 1>;
using GenericMatrix = Eigen::Ref<Matrix>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
// A very small value, but 1.0 / m_near_0 does not overflow
// ~= 1e-307 for the "double" type
static constexpr Scalar m_near_0 = TypeTraits<Scalar>::min() * Scalar(10);
// The machine precision, ~= 1e-16 for the "double" type
static constexpr Scalar m_eps = TypeTraits<Scalar>::epsilon();
Index m_n; // Dimension of the matrix
Matrix m_mat_H; // A copy of the matrix to be factorized
Scalar m_shift_s; // Shift constant
Scalar m_shift_t; // Shift constant
Matrix3X m_ref_u; // Householder reflectors
IntArray m_ref_nr; // How many rows does each reflector affects
// 3 - A general reflector
// 2 - A Givens rotation
// 1 - An identity transformation
bool m_computed; // Whether matrix has been factorized
// Compute sqrt(x1^2 + x2^2 + x3^2) wit high precision
static Scalar stable_norm3(Scalar x1, Scalar x2, Scalar x3)
{
using std::abs;
using std::sqrt;
x1 = abs(x1);
x2 = abs(x2);
x3 = abs(x3);
// Make x1 >= {x2, x3}
if (x1 < x2)
std::swap(x1, x2);
if (x1 < x3)
std::swap(x1, x3);
// If x1 is too small, return 0
if (x1 < m_near_0)
return Scalar(0);
const Scalar r2 = x2 / x1, r3 = x3 / x1;
// We choose a cutoff such that cutoff^4 < eps
// If max(r2, r3) > cutoff, use the standard way; otherwise use Taylor series expansion
// to avoid an explicit sqrt() call that may lose precision
const Scalar cutoff = Scalar(0.1) * pow(m_eps, Scalar(0.25));
Scalar r = r2 * r2 + r3 * r3;
r = (r2 >= cutoff || r3 >= cutoff) ?
sqrt(Scalar(1) + r) :
(Scalar(1) + r * (Scalar(0.5) - Scalar(0.125) * r)); // sqrt(1 + t) ~= 1 + t/2 - t^2/8
return x1 * r;
}
// x[i] <- x[i] / r, r = sqrt(x1^2 + x2^2 + x3^2)
// Assume |x1| >= {|x2|, |x3|}, x1 != 0
static void stable_scaling(Scalar& x1, Scalar& x2, Scalar& x3)
{
using std::abs;
using std::pow;
using std::sqrt;
const Scalar x1sign = (x1 > Scalar(0)) ? Scalar(1) : Scalar(-1);
x1 = abs(x1);
// Use the same method as in stable_norm3()
const Scalar r2 = x2 / x1, r3 = x3 / x1;
const Scalar cutoff = Scalar(0.1) * pow(m_eps, Scalar(0.25));
Scalar r = r2 * r2 + r3 * r3;
// r = 1/sqrt(1 + r2^2 + r3^2)
r = (abs(r2) >= cutoff || abs(r3) >= cutoff) ?
Scalar(1) / sqrt(Scalar(1) + r) :
(Scalar(1) - r * (Scalar(0.5) - Scalar(0.375) * r)); // 1/sqrt(1 + t) ~= 1 - t * (1/2 - (3/8) * t)
x1 = x1sign * r;
x2 = r2 * r;
x3 = r3 * r;
}
void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind)
{
using std::abs;
Scalar* u = &m_ref_u.coeffRef(0, ind);
unsigned char* nr = m_ref_nr.data();
const Scalar x2m = abs(x2), x3m = abs(x3);
// If both x2 and x3 are zero, nr is 1, and we early exit
if (x2m < m_near_0 && x3m < m_near_0)
{
nr[ind] = 1;
return;
}
// In general case the reflector affects 3 rows
// If x3 is zero, decrease nr by 1
nr[ind] = (x3m < m_near_0) ? 2 : 3;
const Scalar x_norm = (x3m < m_near_0) ? Eigen::numext::hypot(x1, x2) : stable_norm3(x1, x2, x3);
// x1' = x1 - rho * ||x||
// rho = -sign(x1), if x1 == 0, we choose rho = 1
const Scalar rho = (x1 <= Scalar(0)) - (x1 > Scalar(0));
const Scalar x1_new = x1 - rho * x_norm, x1m = abs(x1_new);
// Copy x to u
u[0] = x1_new;
u[1] = x2;
u[2] = x3;
if (x1m >= x2m && x1m >= x3m)
{
stable_scaling(u[0], u[1], u[2]);
}
else if (x2m >= x1m && x2m >= x3m)
{
stable_scaling(u[1], u[0], u[2]);
}
else
{
stable_scaling(u[2], u[0], u[1]);
}
}
void compute_reflector(const Scalar* x, Index ind)
{
compute_reflector(x[0], x[1], x[2], ind);
}
// Update the block X = H(il:iu, il:iu)
void update_block(Index il, Index iu)
{
// Block size
const Index bsize = iu - il + 1;
// If block size == 1, there is no need to apply reflectors
if (bsize == 1)
{
m_ref_nr.coeffRef(il) = 1;
return;
}
const Scalar x00 = m_mat_H.coeff(il, il),
x01 = m_mat_H.coeff(il, il + 1),
x10 = m_mat_H.coeff(il + 1, il),
x11 = m_mat_H.coeff(il + 1, il + 1);
// m00 = x00 * (x00 - s) + x01 * x10 + t
const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t;
// m10 = x10 * (x00 + x11 - s)
const Scalar m10 = x10 * (x00 + x11 - m_shift_s);
// For block size == 2, do a Givens rotation on M = X * X - s * X + t * I
if (bsize == 2)
{
// This causes nr=2
compute_reflector(m00, m10, 0, il);
// Apply the reflector to X
apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il);
m_ref_nr.coeffRef(il + 1) = 1;
return;
}
// For block size >=3, use the regular strategy
// m20 = x21 * x10
const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il);
compute_reflector(m00, m10, m20, il);
// Apply the first reflector
apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il);
apply_XP(m_mat_H.block(0, il, il + (std::min)(bsize, Index(4)), 3), m_n, il);
// Calculate the following reflectors
// If entering this loop, block size is at least 4.
for (Index i = 1; i < bsize - 2; i++)
{
compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i);
// Apply the reflector to X
apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i);
apply_XP(m_mat_H.block(0, il + i, il + (std::min)(bsize, Index(i + 4)), 3), m_n, il + i);
}
// The last reflector
// This causes nr=2
compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1);
// Apply the reflector to X
apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1);
apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1);
m_ref_nr.coeffRef(iu) = 1;
}
// P = I - 2 * u * u' = P'
// PX = X - 2 * u * (u'X)
void apply_PX(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0, u1_2 = Scalar(2) * u1;
const Index nrow = X.rows();
const Index ncol = X.cols();
Scalar* xptr = X.data();
if (nr == 2 || nrow == 2)
{
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
}
}
else
{
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < ncol; i++, xptr += stride)
{
const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2];
xptr[0] -= tmp * u0;
xptr[1] -= tmp * u1;
xptr[2] -= tmp * u2;
}
}
}
// x is a pointer to a vector
// Px = x - 2 * dot(x, u) * u
void apply_PX(Scalar* x, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind),
u1 = m_ref_u.coeff(1, u_ind),
u2 = m_ref_u.coeff(2, u_ind);
// When the reflector only contains two elements, u2 has been set to zero
const bool nr_is_2 = (nr == 2);
const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2)));
x[0] -= dot2 * u0;
x[1] -= dot2 * u1;
if (!nr_is_2)
x[2] -= dot2 * u2;
}
// XP = X - 2 * (X * u) * u'
void apply_XP(GenericMatrix X, Index stride, Index u_ind) const
{
const Index nr = m_ref_nr.coeff(u_ind);
if (nr == 1)
return;
const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind);
const Scalar u0_2 = Scalar(2) * u0, u1_2 = Scalar(2) * u1;
const int nrow = X.rows();
const int ncol = X.cols();
Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1)
if (nr == 2 || ncol == 2)
{
// tmp = 2 * u0 * X0 + 2 * u1 * X1
// X0 => X0 - u0 * tmp
// X1 => X1 - u1 * tmp
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
}
}
else
{
Scalar* X2 = X1 + stride; // X2 => X.col(2)
const Scalar u2 = m_ref_u.coeff(2, u_ind);
const Scalar u2_2 = Scalar(2) * u2;
for (Index i = 0; i < nrow; i++)
{
const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i];
X0[i] -= tmp * u0;
X1[i] -= tmp * u1;
X2[i] -= tmp * u2;
}
}
}
public:
DoubleShiftQR(Index size) :
m_n(size),
m_computed(false)
{}
DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) :
m_n(mat.rows()),
m_mat_H(m_n, m_n),
m_shift_s(s),
m_shift_t(t),
m_ref_u(3, m_n),
m_ref_nr(m_n),
m_computed(false)
{
compute(mat, s, t);
}
void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t)
{
using std::abs;
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("DoubleShiftQR: matrix must be square");
m_mat_H.resize(m_n, m_n);
m_shift_s = s;
m_shift_t = t;
m_ref_u.resize(3, m_n);
m_ref_nr.resize(m_n);
// Make a copy of mat
m_mat_H.noalias() = mat;
// Obtain the indices of zero elements in the subdiagonal,
// so that H can be divided into several blocks
const Scalar eps_abs = m_near_0 * (m_n / m_eps);
constexpr Scalar eps_rel = m_eps;
std::vector<int> zero_ind;
zero_ind.reserve(m_n - 1);
zero_ind.push_back(0);
Scalar* Hii = m_mat_H.data();
for (Index i = 0; i < m_n - 1; i++, Hii += (m_n + 1))
{
// Hii[0] => m_mat_H(i, i)
// Hii[1] => m_mat_H(i + 1, i)
// Hii[m_n + 1] => m_mat_H(i + 1, i + 1)
const Scalar h = abs(Hii[1]);
// Deflate small sub-diagonal elements
const Scalar diag = abs(Hii[0]) + abs(Hii[m_n + 1]);
if (h <= eps_abs || h <= eps_rel * diag)
{
Hii[1] = 0;
zero_ind.push_back(i + 1);
}
// Make sure m_mat_H is upper Hessenberg
// Zero the elements below m_mat_H(i + 1, i)
std::fill(Hii + 2, Hii + m_n - i, Scalar(0));
}
zero_ind.push_back(m_n);
const Index len = zero_ind.size() - 1;
for (Index i = 0; i < len; i++)
{
const Index start = zero_ind[i];
const Index end = zero_ind[i + 1] - 1;
// Compute refelctors and update each block
update_block(start, end);
}
// Deflation on the computed result
Hii = m_mat_H.data();
for (Index i = 0; i < m_n - 1; i++, Hii += (m_n + 1))
{
const Scalar h = abs(Hii[1]);
const Scalar diag = abs(Hii[0]) + abs(Hii[m_n + 1]);
if (h <= eps_abs || h <= eps_rel * diag)
Hii[1] = 0;
}
m_computed = true;
}
void matrix_QtHQ(Matrix& dest) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
dest.noalias() = m_mat_H;
}
// Q = P0 * P1 * ...
// Q'y = P_{n-2} * ... * P1 * P0 * y
void apply_QtY(Vector& y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
Scalar* y_ptr = y.data();
const Index n1 = m_n - 1;
for (Index i = 0; i < n1; i++, y_ptr++)
{
apply_PX(y_ptr, i);
}
}
// Q = P0 * P1 * ...
// YQ = Y * P0 * P1 * ...
void apply_YQ(GenericMatrix Y) const
{
if (!m_computed)
throw std::logic_error("DoubleShiftQR: need to call compute() first");
const Index nrow = Y.rows();
const Index n2 = m_n - 2;
for (Index i = 0; i < n2; i++)
{
apply_XP(Y.block(0, i, nrow, 3), nrow, i);
}
apply_XP(Y.block(0, n2, nrow, 2), nrow, n2);
}
};
} // namespace Spectra
#endif // SPECTRA_DOUBLE_SHIFT_QR_H
| 14,768
| 32.489796
| 111
|
h
|
null |
LRMI-main/Spectra/LinAlg/Lanczos.h
|
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_LANCZOS_H
#define SPECTRA_LANCZOS_H
#include "Eigen/Core"
#include <cmath> // std::sqrt
#include <utility> // std::forward
#include <stdexcept> // std::invalid_argument
#include "Arnoldi.h"
namespace Spectra {
// Lanczos factorization A * V = V * H + f * e'
// A: n x n
// V: n x k
// H: k x k
// f: n x 1
// e: [0, ..., 0, 1]
// V and H are allocated of dimension m, so the maximum value of k is m
template <typename Scalar, typename ArnoldiOpType>
class Lanczos : public Arnoldi<Scalar, ArnoldiOpType>
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapMat = Eigen::Map<Matrix>;
using MapVec = Eigen::Map<Vector>;
using MapConstMat = Eigen::Map<const Matrix>;
using Arnoldi<Scalar, ArnoldiOpType>::m_op;
using Arnoldi<Scalar, ArnoldiOpType>::m_n;
using Arnoldi<Scalar, ArnoldiOpType>::m_m;
using Arnoldi<Scalar, ArnoldiOpType>::m_k;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_V;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_H;
using Arnoldi<Scalar, ArnoldiOpType>::m_fac_f;
using Arnoldi<Scalar, ArnoldiOpType>::m_beta;
using Arnoldi<Scalar, ArnoldiOpType>::m_near_0;
using Arnoldi<Scalar, ArnoldiOpType>::m_eps;
public:
// Forward parameter `op` to the constructor of Arnoldi
template <typename T>
Lanczos(T&& op, Index m) :
Arnoldi<Scalar, ArnoldiOpType>(std::forward<T>(op), m)
{}
// Lanczos factorization starting from step-k
void factorize_from(Index from_k, Index to_m, Index& op_counter) override
{
using std::sqrt;
if (to_m <= from_k)
return;
if (from_k > m_k)
{
std::string msg = "Lanczos: from_k (= " + std::to_string(from_k) +
") is larger than the current subspace dimension (= " + std::to_string(m_k) + ")";
throw std::invalid_argument(msg);
}
const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n));
// Pre-allocate vectors
Vector Vf(to_m);
Vector w(m_n);
// Keep the upperleft k x k submatrix of H and set other elements to 0
m_fac_H.rightCols(m_m - from_k).setZero();
m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero();
for (Index i = from_k; i <= to_m - 1; i++)
{
bool restart = false;
// If beta = 0, then the next V is not full rank
// We need to generate a new residual vector that is orthogonal
// to the current V, which we call a restart
if (m_beta < m_near_0)
{
MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns
this->expand_basis(V, 2 * i, m_fac_f, m_beta, op_counter);
restart = true;
}
// v <- f / ||f||
MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column
v.noalias() = m_fac_f / m_beta;
// Note that H[i+1, i] equals to the unrestarted beta
m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta;
m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry
// w <- A * v
m_op.perform_op(v.data(), w.data());
op_counter++;
// f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1}
// If restarting, we know that H[i+1, i] = 0
// First do w <- w - H[i+1, i] * V{i}, see the discussions in Section 2.3 of
// Cullum and Willoughby (2002). Lanczos Algorithms for Large Symmetric Eigenvalue Computations: Vol. 1
if (!restart)
w.noalias() -= m_fac_H(i, i - 1) * m_fac_V.col(i - 1);
// H[i+1, i+1] = <v, w> = v'Bw
m_fac_H(i, i) = m_op.inner_product(v, w);
// f <- w - H[i+1, i+1] * V{i+1}
m_fac_f.noalias() = w - m_fac_H(i, i) * v;
m_beta = m_op.norm(m_fac_f);
// f/||f|| is going to be the next column of V, so we need to test
// whether V'B(f/||f||) ~= 0
const Index i1 = i + 1;
MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
// If not, iteratively correct the residual
int count = 0;
while (count < 5 && ortho_err > m_eps * m_beta)
{
// There is an edge case: when beta=||f|| is close to zero, f mostly consists
// of noises of rounding errors, so the test [ortho_err < eps * beta] is very
// likely to fail. In particular, if beta=0, then the test is ensured to fail.
// Hence when this happens, we force f to be zero, and then restart in the
// next iteration.
if (m_beta < beta_thresh)
{
m_fac_f.setZero();
m_beta = Scalar(0);
break;
}
// f <- f - V * Vf
m_fac_f.noalias() -= Vs * Vf.head(i1);
// h <- h + Vf
m_fac_H(i - 1, i) += Vf[i - 1];
m_fac_H(i, i - 1) = m_fac_H(i - 1, i);
m_fac_H(i, i) += Vf[i];
// beta <- ||f||
m_beta = m_op.norm(m_fac_f);
m_op.trans_product(Vs, m_fac_f, Vf.head(i1));
ortho_err = Vf.head(i1).cwiseAbs().maxCoeff();
count++;
}
}
// Indicate that this is a step-m factorization
m_k = to_m;
}
// Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition
// Function overloading here, not overriding
void compress_H(const TridiagQR<Scalar>& decomp)
{
decomp.matrix_QtHQ(m_fac_H);
m_k--;
}
};
} // namespace Spectra
#endif // SPECTRA_LANCZOS_H
| 6,282
| 35.52907
| 115
|
h
|
null |
LRMI-main/Spectra/LinAlg/Orthogonalization.h
|
// Copyright (C) 2020 Netherlands eScience Center <f.zapata@esciencecenter.nl>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_ORTHOGONALIZATION_H
#define SPECTRA_ORTHOGONALIZATION_H
#include <Eigen/Core>
#include <Eigen/QR>
namespace Spectra {
/// Check if the number of columns to skip is
/// larger than 0 but smaller than the total number
/// of columns of the matrix
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void assert_left_cols_to_skip(Matrix& in_output, Eigen::Index left_cols_to_skip)
{
assert(in_output.cols() > left_cols_to_skip && "left_cols_to_skip is larger than columns of matrix");
assert(left_cols_to_skip >= 0 && "left_cols_to_skip is negative");
}
/// If the the number of columns to skip is null,
/// normalize the first column and set left_cols_to_skip=1
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
/// \return Actual number of left columns to skip
template <typename Matrix>
Eigen::Index treat_first_col(Matrix& in_output, Eigen::Index left_cols_to_skip)
{
if (left_cols_to_skip == 0)
{
in_output.col(0).normalize();
left_cols_to_skip = 1;
}
return left_cols_to_skip;
}
/// Orthogonalize the in_output matrix using a QR decomposition
/// \param in_output Matrix to be orthogonalized
template <typename Matrix>
void QR_orthogonalisation(Matrix& in_output)
{
using InternalMatrix = Eigen::Matrix<typename Matrix::Scalar, Eigen::Dynamic, Eigen::Dynamic>;
Eigen::Index nrows = in_output.rows();
Eigen::Index ncols = in_output.cols();
ncols = (std::min)(nrows, ncols);
InternalMatrix I = InternalMatrix::Identity(nrows, ncols);
Eigen::HouseholderQR<Matrix> qr(in_output);
in_output.leftCols(ncols).noalias() = qr.householderQ() * I;
}
/// Orthogonalize the in_output matrix using a modified Gram Schmidt process
/// \param in_output matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void MGS_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
left_cols_to_skip = treat_first_col(in_output, left_cols_to_skip);
for (Eigen::Index k = left_cols_to_skip; k < in_output.cols(); ++k)
{
for (Eigen::Index j = 0; j < k; j++)
{
in_output.col(k) -= in_output.col(j).dot(in_output.col(k)) * in_output.col(j);
}
in_output.col(k).normalize();
}
}
/// Orthogonalize the in_output matrix using a Gram Schmidt process
/// \param in_output matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void GS_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
left_cols_to_skip = treat_first_col(in_output, left_cols_to_skip);
for (Eigen::Index j = left_cols_to_skip; j < in_output.cols(); ++j)
{
in_output.col(j) -= in_output.leftCols(j) * (in_output.leftCols(j).transpose() * in_output.col(j));
in_output.col(j).normalize();
}
}
/// Orthogonalize the subspace spanned by right columns of in_output
/// against the subspace spanned by left columns
/// It assumes that the left columns are already orthogonal and normalized,
/// and it does not orthogonalize the left columns against each other
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void subspace_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
if (left_cols_to_skip == 0)
{
return;
}
Eigen::Index right_cols_to_ortho = in_output.cols() - left_cols_to_skip;
in_output.rightCols(right_cols_to_ortho) -= in_output.leftCols(left_cols_to_skip) *
(in_output.leftCols(left_cols_to_skip).transpose() * in_output.rightCols(right_cols_to_ortho));
}
/// Orthogonalize the in_output matrix using a Jens process
/// The subspace spanned by right columns are first orthogonalized
/// agains the left columns, and then a QR decomposition is applied on the right columns
/// to make them orthogonalized agains each other
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void JensWehner_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
assert_left_cols_to_skip(in_output, left_cols_to_skip);
Eigen::Index right_cols_to_ortho = in_output.cols() - left_cols_to_skip;
subspace_orthogonalisation(in_output, left_cols_to_skip);
Eigen::Ref<Matrix> right_cols = in_output.rightCols(right_cols_to_ortho);
QR_orthogonalisation(right_cols);
}
/// Orthogonalize the in_output matrix using a twice-is-enough Jens process
/// \param in_output Matrix to be orthogonalized
/// \param left_cols_to_skip Number of left columns to be left untouched
template <typename Matrix>
void twice_is_enough_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0)
{
JensWehner_orthogonalisation(in_output, left_cols_to_skip);
JensWehner_orthogonalisation(in_output, left_cols_to_skip);
}
} // namespace Spectra
#endif //SPECTRA_ORTHOGONALIZATION_H
| 5,705
| 39.183099
| 107
|
h
|
null |
LRMI-main/Spectra/LinAlg/RitzPairs.h
|
// Copyright (C) 2020 Netherlands eScience Center <n.renauld@esciencecenter.nl>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_RITZ_PAIRS_H
#define SPECTRA_RITZ_PAIRS_H
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "../Util/SelectionRule.h"
namespace Spectra {
template <typename Scalar>
class SearchSpace;
/// This class handles the creation and manipulation of Ritz eigen pairs
/// for iterative eigensolvers such as Davidson, Jacobi-Davidson, etc.
template <typename Scalar>
class RitzPairs
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>;
using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>;
Vector m_values; // eigenvalues
Matrix m_small_vectors; // eigenvectors of the small problem, makes restart cheaper.
Matrix m_vectors; // Ritz (or harmonic Ritz) eigenvectors
Matrix m_residues; // residues of the pairs
BoolArray m_root_converged;
public:
RitzPairs() = default;
/// Compute the eigen values/vectors
///
/// \param search_space Instance of the class handling the search space
/// \return Eigen::ComputationalInfo Whether small eigenvalue problem worked
Eigen::ComputationInfo compute_eigen_pairs(const SearchSpace<Scalar>& search_space);
/// Returns the size of the ritz eigen pairs
///
/// \return Eigen::Index Number of pairs
Index size() const { return m_values.size(); }
/// Sort the eigen pairs according to the selection rule
///
/// \param selection Sorting rule
void sort(SortRule selection)
{
std::vector<Index> ind = argsort(selection, m_values);
RitzPairs<Scalar> temp = *this;
for (Index i = 0; i < size(); i++)
{
m_values[i] = temp.m_values[ind[i]];
m_vectors.col(i) = temp.m_vectors.col(ind[i]);
m_residues.col(i) = temp.m_residues.col(ind[i]);
m_small_vectors.col(i) = temp.m_small_vectors.col(ind[i]);
}
}
/// Checks if the algorithm has converged and updates root_converged
///
/// \param tol Tolerance for convergence
/// \param number_eigenvalue Number of request eigenvalues
/// \return bool true if all eigenvalues are converged
bool check_convergence(Scalar tol, Index number_eigenvalues)
{
const Array norms = m_residues.colwise().norm();
bool converged = true;
m_root_converged = BoolArray::Zero(norms.size());
for (Index j = 0; j < norms.size(); j++)
{
m_root_converged[j] = (norms[j] < tol);
if (j < number_eigenvalues)
{
converged &= (norms[j] < tol);
}
}
return converged;
}
const Matrix& ritz_vectors() const { return m_vectors; }
const Vector& ritz_values() const { return m_values; }
const Matrix& small_ritz_vectors() const { return m_small_vectors; }
const Matrix& residues() const { return m_residues; }
const BoolArray& converged_eigenvalues() const { return m_root_converged; }
};
} // namespace Spectra
#include "SearchSpace.h"
namespace Spectra {
/// Creates the small space matrix and computes its eigen pairs
/// Also computes the ritz vectors and residues
///
/// \param search_space Instance of the SearchSpace class
template <typename Scalar>
Eigen::ComputationInfo RitzPairs<Scalar>::compute_eigen_pairs(const SearchSpace<Scalar>& search_space)
{
const Matrix& basis_vectors = search_space.basis_vectors();
const Matrix& op_basis_prod = search_space.operator_basis_product();
// Form the small eigenvalue
Matrix small_matrix = basis_vectors.transpose() * op_basis_prod;
// Small eigenvalue problem
Eigen::SelfAdjointEigenSolver<Matrix> eigen_solver(small_matrix);
m_values = eigen_solver.eigenvalues();
m_small_vectors = eigen_solver.eigenvectors();
// Ritz vectors
m_vectors = basis_vectors * m_small_vectors;
// Residues
m_residues = op_basis_prod * m_small_vectors - m_vectors * m_values.asDiagonal();
return eigen_solver.info();
}
} // namespace Spectra
#endif // SPECTRA_RITZ_PAIRS_H
| 4,472
| 33.145038
| 102
|
h
|
null |
LRMI-main/Spectra/LinAlg/SearchSpace.h
|
// Copyright (C) 2020 Netherlands eScience Center <n.renauld@esciencecenter.nl>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SEARCH_SPACE_H
#define SPECTRA_SEARCH_SPACE_H
#include <Eigen/Core>
#include "RitzPairs.h"
#include "Orthogonalization.h"
namespace Spectra {
/// This class handles the creation and manipulation of the search space
/// for iterative eigensolvers such as Davidson, Jacobi-Davidson, etc.
template <typename Scalar>
class SearchSpace
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
Matrix m_basis_vectors;
Matrix m_op_basis_product;
/// Append new vector to the basis
///
/// \param new_vect Matrix of new correction vectors
void append_new_vectors_to_basis(const Matrix& new_vect)
{
Index num_update = new_vect.cols();
m_basis_vectors.conservativeResize(Eigen::NoChange, m_basis_vectors.cols() + num_update);
m_basis_vectors.rightCols(num_update).noalias() = new_vect;
}
public:
SearchSpace() = default;
/// Returns the current size of the search space
Index size() const { return m_basis_vectors.cols(); }
void initialize_search_space(const Eigen::Ref<const Matrix>& initial_vectors)
{
m_basis_vectors = initial_vectors;
m_op_basis_product = Matrix(initial_vectors.rows(), 0);
}
/// Updates the matrix formed by the operator applied to the search space
/// after the addition of new vectors in the search space. Only the product
/// of the operator with the new vectors is computed and the result is appended
/// to the op_basis_product member variable
///
/// \param OpType Operator representing the matrix
template <typename OpType>
void update_operator_basis_product(OpType& op)
{
Index nvec = m_basis_vectors.cols() - m_op_basis_product.cols();
m_op_basis_product.conservativeResize(Eigen::NoChange, m_basis_vectors.cols());
m_op_basis_product.rightCols(nvec).noalias() = op * m_basis_vectors.rightCols(nvec);
}
/// Restart the search space by reducing the basis vector to the last
/// Ritz eigenvector
///
/// \param ritz_pair Instance of a RitzPair class
/// \param size Size of the restart
void restart(const RitzPairs<Scalar>& ritz_pairs, Index size)
{
m_basis_vectors = ritz_pairs.ritz_vectors().leftCols(size);
m_op_basis_product = m_op_basis_product * ritz_pairs.small_ritz_vectors().leftCols(size);
}
/// Append new vectors to the search space and
/// orthogonalize the resulting matrix
///
/// \param new_vect Matrix of new correction vectors
void extend_basis(const Matrix& new_vect)
{
Index left_cols_to_skip = size();
append_new_vectors_to_basis(new_vect);
twice_is_enough_orthogonalisation(m_basis_vectors, left_cols_to_skip);
}
/// Returns the basis vectors
const Matrix& basis_vectors() const { return m_basis_vectors; }
/// Returns the operator applied to basis vector
const Matrix& operator_basis_product() const { return m_op_basis_product; }
};
} // namespace Spectra
#endif // SPECTRA_SEARCH_SPACE_H
| 3,388
| 33.938144
| 97
|
h
|
null |
LRMI-main/Spectra/LinAlg/TridiagEigen.h
|
// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_TRIDIAG_EIGEN_H
#define SPECTRA_TRIDIAG_EIGEN_H
#include "Eigen/Core"
#include "Eigen/Jacobi"
#include <stdexcept>
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class TridiagEigen
{
private:
using Index = Eigen::Index;
// For convenience in adapting the tridiagonal_qr_step() function
using RealScalar = Scalar;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using GenericMatrix = Eigen::Ref<Matrix>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
Index m_n;
Vector m_main_diag; // Main diagonal elements of the matrix
Vector m_sub_diag; // Sub-diagonal elements of the matrix
Matrix m_evecs; // To store eigenvectors
bool m_computed;
// Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h
// Francis implicit QR step.
static void tridiagonal_qr_step(RealScalar* diag,
RealScalar* subdiag, Index start,
Index end, Scalar* matrixQ,
Index n)
{
using std::abs;
// Wilkinson Shift.
RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5);
RealScalar e = subdiag[end - 1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
if (td == RealScalar(0))
mu -= abs(e);
else if (e != RealScalar(0))
{
const RealScalar e2 = Eigen::numext::abs2(e);
const RealScalar h = Eigen::numext::hypot(td, e);
if (e2 == RealScalar(0))
mu -= e / ((td + (td > RealScalar(0) ? h : -h)) / e);
else
mu -= e2 / (td + (td > RealScalar(0) ? h : -h));
}
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
Eigen::Map<Matrix> q(matrixQ, n, n);
// If z ever becomes zero, the Givens rotation will be the identity and
// z will stay zero for all future iterations.
for (Index k = start; k < end && z != RealScalar(0); ++k)
{
Eigen::JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
const RealScalar s = rot.s();
const RealScalar c = rot.c();
// do T = G' T G
RealScalar sdk = s * diag[k] + c * subdiag[k];
RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1];
diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]);
diag[k + 1] = s * sdk + c * dkp1;
subdiag[k] = c * sdk - s * dkp1;
if (k > start)
subdiag[k - 1] = c * subdiag[k - 1] - s * z;
// "Chasing the bulge" to return to triangular form.
x = subdiag[k];
if (k < end - 1)
{
z = -s * subdiag[k + 1];
subdiag[k + 1] = c * subdiag[k + 1];
}
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
q.applyOnTheRight(k, k + 1, rot);
}
}
public:
TridiagEigen() :
m_n(0), m_computed(false)
{}
TridiagEigen(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false)
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
// A very small value, but 1.0 / near_0 does not overflow
// ~= 1e-307 for the "double" type
constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10);
m_n = mat.rows();
if (m_n != mat.cols())
throw std::invalid_argument("TridiagEigen: matrix must be square");
m_main_diag.resize(m_n);
m_sub_diag.resize(m_n - 1);
m_evecs.resize(m_n, m_n);
m_evecs.setIdentity();
// Scale matrix to improve stability
const Scalar scale = (std::max)(mat.diagonal().cwiseAbs().maxCoeff(),
mat.diagonal(-1).cwiseAbs().maxCoeff());
// If scale=0, mat is a zero matrix, so we can early stop
if (scale < near_0)
{
// m_main_diag contains eigenvalues
m_main_diag.setZero();
// m_evecs has been set identity
// m_evecs.setIdentity();
m_computed = true;
return;
}
m_main_diag.noalias() = mat.diagonal() / scale;
m_sub_diag.noalias() = mat.diagonal(-1) / scale;
Scalar* diag = m_main_diag.data();
Scalar* subdiag = m_sub_diag.data();
Index end = m_n - 1;
Index start = 0;
Index iter = 0; // total number of iterations
int info = 0; // 0 for success, 1 for failure
const Scalar considerAsZero = TypeTraits<Scalar>::min();
const Scalar precision_inv = Scalar(1) / Eigen::NumTraits<Scalar>::epsilon();
while (end > 0)
{
for (Index i = start; i < end; i++)
{
if (abs(subdiag[i]) <= considerAsZero)
subdiag[i] = Scalar(0);
else
{
// abs(subdiag[i]) <= epsilon * sqrt(abs(diag[i]) + abs(diag[i+1]))
// Scaled to prevent underflows.
const Scalar scaled_subdiag = precision_inv * subdiag[i];
if (scaled_subdiag * scaled_subdiag <= (abs(diag[i]) + abs(diag[i + 1])))
subdiag[i] = Scalar(0);
}
}
// find the largest unreduced block at the end of the matrix.
while (end > 0 && subdiag[end - 1] == Scalar(0))
end--;
if (end <= 0)
break;
// if we spent too many iterations, we give up
iter++;
if (iter > 30 * m_n)
{
info = 1;
break;
}
start = end - 1;
while (start > 0 && subdiag[start - 1] != Scalar(0))
start--;
tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n);
}
if (info > 0)
throw std::runtime_error("TridiagEigen: eigen decomposition failed");
// Scale eigenvalues back
m_main_diag *= scale;
m_computed = true;
}
const Vector& eigenvalues() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
// After calling compute(), main_diag will contain the eigenvalues.
return m_main_diag;
}
const Matrix& eigenvectors() const
{
if (!m_computed)
throw std::logic_error("TridiagEigen: need to call compute() first");
return m_evecs;
}
};
} // namespace Spectra
#endif // SPECTRA_TRIDIAG_EIGEN_H
| 7,776
| 32.666667
| 98
|
h
|
null |
LRMI-main/Spectra/LinAlg/UpperHessenbergSchur.h
|
// The code was adapted from Eigen/src/Eigenvaleus/RealSchur.h
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_UPPER_HESSENBERG_SCHUR_H
#define SPECTRA_UPPER_HESSENBERG_SCHUR_H
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <Eigen/Householder>
#include <stdexcept>
#include "../Util/TypeTraits.h"
namespace Spectra {
template <typename Scalar = double>
class UpperHessenbergSchur
{
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using Vector2s = Eigen::Matrix<Scalar, 2, 1>;
using Vector3s = Eigen::Matrix<Scalar, 3, 1>;
using GenericMatrix = Eigen::Ref<Matrix>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
Index m_n; // Size of the matrix
Matrix m_T; // T matrix, A = UTU'
Matrix m_U; // U matrix, A = UTU'
bool m_computed;
// L1 norm of an upper Hessenberg matrix
static Scalar upper_hessenberg_l1_norm(ConstGenericMatrix& x)
{
const Index n = x.cols();
Scalar norm(0);
for (Index j = 0; j < n; j++)
norm += x.col(j).segment(0, (std::min)(n, j + 2)).cwiseAbs().sum();
return norm;
}
// Look for single small sub-diagonal element and returns its index
Index find_small_subdiag(Index iu, const Scalar& near_0) const
{
using std::abs;
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_T.coeff(res - 1, res - 1)) + abs(m_T.coeff(res, res));
s = Eigen::numext::maxi<Scalar>(s * eps, near_0);
if (abs(m_T.coeff(res, res - 1)) <= s)
break;
res--;
}
return res;
}
// Update T given that rows iu-1 and iu decouple from the rest
void split_off_two_rows(Index iu, const Scalar& ex_shift)
{
using std::sqrt;
using std::abs;
// The eigenvalues of the 2x2 matrix [a b; c d] are
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
Scalar p = Scalar(0.5) * (m_T.coeff(iu - 1, iu - 1) - m_T.coeff(iu, iu));
Scalar q = p * p + m_T.coeff(iu, iu - 1) * m_T.coeff(iu - 1, iu); // q = tr^2 / 4 - det = discr/4
m_T.coeffRef(iu, iu) += ex_shift;
m_T.coeffRef(iu - 1, iu - 1) += ex_shift;
if (q >= Scalar(0)) // Two real eigenvalues
{
Scalar z = sqrt(abs(q));
Eigen::JacobiRotation<Scalar> rot;
rot.makeGivens((p >= Scalar(0)) ? (p + z) : (p - z), m_T.coeff(iu, iu - 1));
m_T.rightCols(m_n - iu + 1).applyOnTheLeft(iu - 1, iu, rot.adjoint());
m_T.topRows(iu + 1).applyOnTheRight(iu - 1, iu, rot);
m_T.coeffRef(iu, iu - 1) = Scalar(0);
m_U.applyOnTheRight(iu - 1, iu, rot);
}
if (iu > 1)
m_T.coeffRef(iu - 1, iu - 2) = Scalar(0);
}
// Form shift in shift_info, and update ex_shift if an exceptional shift is performed
void compute_shift(Index iu, Index iter, Scalar& ex_shift, Vector3s& shift_info)
{
using std::sqrt;
using std::abs;
shift_info.coeffRef(0) = m_T.coeff(iu, iu);
shift_info.coeffRef(1) = m_T.coeff(iu - 1, iu - 1);
shift_info.coeffRef(2) = m_T.coeff(iu, iu - 1) * m_T.coeff(iu - 1, iu);
// Wilkinson's original ad hoc shift
if (iter == 10)
{
ex_shift += shift_info.coeff(0);
for (Index i = 0; i <= iu; ++i)
m_T.coeffRef(i, i) -= shift_info.coeff(0);
Scalar s = abs(m_T.coeff(iu, iu - 1)) + abs(m_T.coeff(iu - 1, iu - 2));
shift_info.coeffRef(0) = Scalar(0.75) * s;
shift_info.coeffRef(1) = Scalar(0.75) * s;
shift_info.coeffRef(2) = Scalar(-0.4375) * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
Scalar s = (shift_info.coeff(1) - shift_info.coeff(0)) / Scalar(2);
s = s * s + shift_info.coeff(2);
if (s > Scalar(0))
{
s = sqrt(s);
if (shift_info.coeff(1) < shift_info.coeff(0))
s = -s;
s = s + (shift_info.coeff(1) - shift_info.coeff(0)) / Scalar(2);
s = shift_info.coeff(0) - shift_info.coeff(2) / s;
ex_shift += s;
for (Index i = 0; i <= iu; ++i)
m_T.coeffRef(i, i) -= s;
shift_info.setConstant(Scalar(0.964));
}
}
}
// Compute index im at which Francis QR step starts and the first Householder vector
void init_francis_qr_step(Index il, Index iu, const Vector3s& shift_info, Index& im, Vector3s& first_householder_vec) const
{
using std::abs;
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
Vector3s& v = first_householder_vec; // alias to save typing
for (im = iu - 2; im >= il; --im)
{
const Scalar Tmm = m_T.coeff(im, im);
const Scalar r = shift_info.coeff(0) - Tmm;
const Scalar s = shift_info.coeff(1) - Tmm;
v.coeffRef(0) = (r * s - shift_info.coeff(2)) / m_T.coeff(im + 1, im) + m_T.coeff(im, im + 1);
v.coeffRef(1) = m_T.coeff(im + 1, im + 1) - Tmm - r - s;
v.coeffRef(2) = m_T.coeff(im + 2, im + 1);
if (im == il)
break;
const Scalar lhs = m_T.coeff(im, im - 1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_T.coeff(im - 1, im - 1)) + abs(Tmm) + abs(m_T.coeff(im + 1, im + 1)));
if (abs(lhs) < eps * rhs)
break;
}
}
// P = I - tau * v * v' = P'
// PX = X - tau * v * (v'X), X [3 x c]
static void apply_householder_left(const Vector2s& ess, const Scalar& tau, Scalar* x, Index ncol, Index stride)
{
const Scalar v1 = ess.coeff(0), v2 = ess.coeff(1);
const Scalar* const x_end = x + ncol * stride;
for (; x < x_end; x += stride)
{
const Scalar tvx = tau * (x[0] + v1 * x[1] + v2 * x[2]);
x[0] -= tvx;
x[1] -= tvx * v1;
x[2] -= tvx * v2;
}
}
// P = I - tau * v * v' = P'
// XP = X - tau * (X * v) * v', X [r x 3]
static void apply_householder_right(const Vector2s& ess, const Scalar& tau, Scalar* x, Index nrow, Index stride)
{
const Scalar v1 = ess.coeff(0), v2 = ess.coeff(1);
Scalar* x0 = x;
Scalar* x1 = x + stride;
Scalar* x2 = x1 + stride;
for (Index i = 0; i < nrow; i++)
{
const Scalar txv = tau * (x0[i] + v1 * x1[i] + v2 * x2[i]);
x0[i] -= txv;
x1[i] -= txv * v1;
x2[i] -= txv * v2;
}
}
// Perform a Francis QR step involving rows il:iu and columns im:iu
void perform_francis_qr_step(Index il, Index im, Index iu, const Vector3s& first_householder_vec, const Scalar& near_0)
{
using std::abs;
for (Index k = im; k <= iu - 2; ++k)
{
const bool first_iter = (k == im);
Vector3s v;
if (first_iter)
v = first_householder_vec;
else
v = m_T.template block<3, 1>(k, k - 1);
Scalar tau, beta;
Vector2s ess;
v.makeHouseholder(ess, tau, beta);
if (abs(beta) > near_0) // if v is not zero
{
if (first_iter && k > il)
m_T.coeffRef(k, k - 1) = -m_T.coeff(k, k - 1);
else if (!first_iter)
m_T.coeffRef(k, k - 1) = beta;
// These Householder transformations form the O(n^3) part of the algorithm
// m_T.block(k, k, 3, m_n - k).applyHouseholderOnTheLeft(ess, tau, workspace);
// m_T.block(0, k, (std::min)(iu, k + 3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
// m_U.block(0, k, m_n, 3).applyHouseholderOnTheRight(ess, tau, workspace);
apply_householder_left(ess, tau, &m_T.coeffRef(k, k), m_n - k, m_n);
apply_householder_right(ess, tau, &m_T.coeffRef(0, k), (std::min)(iu, k + 3) + 1, m_n);
apply_householder_right(ess, tau, &m_U.coeffRef(0, k), m_n, m_n);
}
}
// The last 2-row block
Eigen::JacobiRotation<Scalar> rot;
Scalar beta;
rot.makeGivens(m_T.coeff(iu - 1, iu - 2), m_T.coeff(iu, iu - 2), &beta);
if (abs(beta) > near_0) // if v is not zero
{
m_T.coeffRef(iu - 1, iu - 2) = beta;
m_T.rightCols(m_n - iu + 1).applyOnTheLeft(iu - 1, iu, rot.adjoint());
m_T.topRows(iu + 1).applyOnTheRight(iu - 1, iu, rot);
m_U.applyOnTheRight(iu - 1, iu, rot);
}
// clean up pollution due to round-off errors
for (Index i = im + 2; i <= iu; ++i)
{
m_T.coeffRef(i, i - 2) = Scalar(0);
if (i > im + 2)
m_T.coeffRef(i, i - 3) = Scalar(0);
}
}
public:
UpperHessenbergSchur() :
m_n(0), m_computed(false)
{}
UpperHessenbergSchur(ConstGenericMatrix& mat) :
m_n(mat.rows()), m_computed(false)
{
compute(mat);
}
void compute(ConstGenericMatrix& mat)
{
using std::abs;
using std::sqrt;
if (mat.rows() != mat.cols())
throw std::invalid_argument("UpperHessenbergSchur: matrix must be square");
m_n = mat.rows();
m_T.resize(m_n, m_n);
m_U.resize(m_n, m_n);
constexpr Index max_iter_per_row = 40;
const Index max_iter = m_n * max_iter_per_row;
m_T.noalias() = mat;
m_U.setIdentity();
// The matrix m_T is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_T(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_n - 1;
Index iter = 0; // iteration count for current eigenvalue
Index total_iter = 0; // iteration count for whole matrix
Scalar ex_shift(0); // sum of exceptional shifts
const Scalar norm = upper_hessenberg_l1_norm(m_T);
// sub-diagonal entries smaller than near_0 will be treated as zero.
// We use eps^2 to enable more precision in small eigenvalues.
const Scalar eps = Eigen::NumTraits<Scalar>::epsilon();
const Scalar near_0 = Eigen::numext::maxi<Scalar>(norm * eps * eps, TypeTraits<Scalar>::min());
if (norm != Scalar(0))
{
while (iu >= 0)
{
Index il = find_small_subdiag(iu, near_0);
// Check for convergence
if (il == iu) // One root found
{
m_T.coeffRef(iu, iu) += ex_shift;
if (iu > 0)
m_T.coeffRef(iu, iu - 1) = Scalar(0);
iu--;
iter = 0;
}
else if (il == iu - 1) // Two roots found
{
split_off_two_rows(iu, ex_shift);
iu -= 2;
iter = 0;
}
else // No convergence yet
{
Vector3s first_householder_vec = Vector3s::Zero(), shift_info;
compute_shift(iu, iter, ex_shift, shift_info);
iter++;
total_iter++;
if (total_iter > max_iter)
break;
Index im;
init_francis_qr_step(il, iu, shift_info, im, first_householder_vec);
perform_francis_qr_step(il, im, iu, first_householder_vec, near_0);
}
}
}
if (total_iter > max_iter)
throw std::runtime_error("UpperHessenbergSchur: Schur decomposition failed");
m_computed = true;
}
const Matrix& matrix_T() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergSchur: need to call compute() first");
return m_T;
}
const Matrix& matrix_U() const
{
if (!m_computed)
throw std::logic_error("UpperHessenbergSchur: need to call compute() first");
return m_U;
}
void swap_T(Matrix& other)
{
m_T.swap(other);
}
void swap_U(Matrix& other)
{
m_U.swap(other);
}
};
} // namespace Spectra
#endif // SPECTRA_UPPER_HESSENBERG_SCHUR_H
| 13,184
| 35.123288
| 127
|
h
|
null |
LRMI-main/Spectra/MatOp/DenseCholesky.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_CHOLESKY_H
#define SPECTRA_DENSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor>
class DenseCholesky
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
const Index m_n;
Eigen::LLT<Matrix, Uplo> m_decomp;
CompInfo m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseCholesky(const Eigen::MatrixBase<Derived>& mat) :
m_n(mat.rows()), m_info(CompInfo::NotComputed)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseCholesky: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (m_n != mat.cols())
throw std::invalid_argument("DenseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
CompInfo::Successful :
CompInfo::NumericalIssue;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixL().solve(x);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_CHOLESKY_H
| 4,101
| 31.555556
| 129
|
h
|
null |
LRMI-main/Spectra/MatOp/DenseGenMatProd.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_GEN_MAT_PROD_H
#define SPECTRA_DENSE_GEN_MAT_PROD_H
#include <Eigen/Core>
namespace Spectra {
///
/// \defgroup MatOp Matrix Operations
///
/// Define matrix operations on existing matrix objects
///
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and
/// SymEigsSolver eigen solvers.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Flags = Eigen::ColMajor>
class DenseGenMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseGenMatProd(const Eigen::MatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseGenMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_GEN_MAT_PROD_H
| 3,352
| 28.672566
| 131
|
h
|
null |
LRMI-main/Spectra/MatOp/DenseSymMatProd.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_SYM_MAT_PROD_H
#define SPECTRA_DENSE_SYM_MAT_PROD_H
#include "Eigen/Core"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor>
class DenseSymMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
ConstGenericMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseSymMatProd(const Eigen::MatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseSymMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat.template selfadjointView<Uplo>() * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_SYM_MAT_PROD_H
| 3,452
| 30.972222
| 131
|
h
|
null |
LRMI-main/Spectra/MatOp/DenseSymShiftSolve.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
#define SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
#include <Eigen/Core>
#include <stdexcept>
#include "../LinAlg/BKLDLT.h"
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$,
/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and
/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor>
class DenseSymShiftSolve
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using ConstGenericMatrix = const Eigen::Ref<const Matrix>;
ConstGenericMatrix m_mat;
const Index m_n;
BKLDLT<Scalar> m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** matrix object, whose type can be
/// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and
/// `Eigen::MatrixXf`), or its mapped version
/// (e.g. `Eigen::Map<Eigen::MatrixXd>`).
///
template <typename Derived>
DenseSymShiftSolve(const Eigen::MatrixBase<Derived>& mat) :
m_mat(mat), m_n(mat.rows())
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor),
"DenseSymShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (m_n != mat.cols())
throw std::invalid_argument("DenseSymShiftSolve: matrix must be square");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_solver.compute(m_mat, Uplo, sigma);
if (m_solver.info() != CompInfo::Successful)
throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift");
}
///
/// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * I) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
| 3,643
| 31.828829
| 134
|
h
|
null |
LRMI-main/Spectra/MatOp/SparseCholesky.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SPARSE_CHOLESKY_H
#define SPECTRA_SPARSE_CHOLESKY_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseCholesky>
#include <stdexcept>
#include "../Util/CompInfo.h"
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the operations related to Cholesky decomposition on a
/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular
/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver
/// in the Cholesky decomposition mode.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
/// \tparam StorageIndex The type of the indices for the sparse matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int>
class SparseCholesky
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>;
const Index m_n;
Eigen::SimplicialLLT<SparseMatrix, Uplo> m_decomp;
CompInfo m_info; // status of the decomposition
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
template <typename Derived>
SparseCholesky(const Eigen::SparseMatrixBase<Derived>& mat) :
m_n(mat.rows())
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor),
"SparseCholesky: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (mat.rows() != mat.cols())
throw std::invalid_argument("SparseCholesky: matrix must be square");
m_decomp.compute(mat);
m_info = (m_decomp.info() == Eigen::Success) ?
CompInfo::Successful :
CompInfo::NumericalIssue;
}
///
/// Returns the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Returns the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Returns the status of the computation.
/// The full list of enumeration values can be found in \ref Enumerations.
///
CompInfo info() const { return m_info; }
///
/// Performs the lower triangular solving operation \f$y=L^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * x_in
void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.permutationP() * x;
m_decomp.matrixL().solveInPlace(y);
}
///
/// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L') * x_in
void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_decomp.matrixU().solve(x);
y = m_decomp.permutationPinv() * y;
}
};
} // namespace Spectra
#endif // SPECTRA_SPARSE_CHOLESKY_H
| 4,334
| 32.604651
| 130
|
h
|
null |
LRMI-main/Spectra/MatOp/SparseGenMatProd.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SPARSE_GEN_MAT_PROD_H
#define SPECTRA_SPARSE_GEN_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver
/// eigen solvers.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
/// \tparam StorageIndex The type of the indices for the sparse matrix.
///
template <typename Scalar_, int Flags = Eigen::ColMajor, typename StorageIndex = int>
class SparseGenMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>;
using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
template <typename Derived>
SparseGenMatProd(const Eigen::SparseMatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor),
"SparseGenMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat.coeff(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_SPARSE_GEN_MAT_PROD_H
| 3,470
| 31.138889
| 132
|
h
|
null |
LRMI-main/Spectra/MatOp/SparseSymMatProd.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SPARSE_SYM_MAT_PROD_H
#define SPECTRA_SPARSE_SYM_MAT_PROD_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
namespace Spectra {
///
/// \ingroup MatOp
///
/// This class defines the matrix-vector multiplication operation on a
/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector
/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver.
///
/// \tparam Scalar_ The element type of the matrix, for example,
/// `float`, `double`, and `long double`.
/// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which
/// triangular part of the matrix is used.
/// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating
/// the storage format of the input matrix.
/// \tparam StorageIndex The type of the indices for the sparse matrix.
///
template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int>
class SparseSymMatProd
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>;
using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>;
ConstGenericSparseMatrix m_mat;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param mat An **Eigen** sparse matrix object, whose type can be
/// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version
/// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`.
///
template <typename Derived>
SparseSymMatProd(const Eigen::SparseMatrixBase<Derived>& mat) :
m_mat(mat)
{
static_assert(
static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor),
"SparseSymMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_mat.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_mat.cols(); }
///
/// Perform the matrix-vector multiplication operation \f$y=Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_mat.cols());
MapVec y(y_out, m_mat.rows());
y.noalias() = m_mat.template selfadjointView<Uplo>() * x;
}
///
/// Perform the matrix-matrix multiplication operation \f$y=Ax\f$.
///
Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const
{
return m_mat.template selfadjointView<Uplo>() * mat_in;
}
///
/// Extract (i,j) element of the underlying matrix.
///
Scalar operator()(Index i, Index j) const
{
return m_mat.coeff(i, j);
}
};
} // namespace Spectra
#endif // SPECTRA_SPARSE_SYM_MAT_PROD_H
| 3,695
| 32.908257
| 132
|
h
|
null |
LRMI-main/Spectra/MatOp/SymShiftInvert.h
|
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_SHIFT_INVERT_H
#define SPECTRA_SYM_SHIFT_INVERT_H
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <stdexcept>
#include <type_traits> // std::conditional, std::is_same
#include "../LinAlg/BKLDLT.h"
#include "../Util/CompInfo.h"
namespace Spectra {
/// \cond
// Compute and factorize A-sigma*B without unnecessary copying
// Default case: A is sparse, B is sparse
template <bool AIsSparse, bool BIsSparse, int UploA, int UploB>
class SymShiftInvertHelper
{
public:
template <typename Scalar, typename Fac, typename ArgA, typename ArgB>
static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma)
{
using SpMat = typename ArgA::PlainObject;
SpMat matA = A.template selfadjointView<UploA>();
SpMat matB = B.template selfadjointView<UploB>();
SpMat mat = matA - sigma * matB;
// SparseLU solver
fac.isSymmetric(true);
fac.compute(mat);
// Return true if successful
return fac.info() == Eigen::Success;
}
};
// A is dense, B is dense or sparse
template <bool BIsSparse, int UploA, int UploB>
class SymShiftInvertHelper<false, BIsSparse, UploA, UploB>
{
public:
template <typename Scalar, typename Fac, typename ArgA, typename ArgB>
static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma)
{
using Matrix = typename ArgA::PlainObject;
// Make a copy of the <UploA> triangular part of A
Matrix mat(A.rows(), A.cols());
mat.template triangularView<UploA>() = A;
// Update <UploA> triangular part of mat
if (UploA == UploB)
mat -= (B * sigma).template triangularView<UploA>();
else
mat -= (B * sigma).template triangularView<UploB>().transpose();
// BKLDLT solver
fac.compute(mat, UploA);
// Return true if successful
return fac.info() == CompInfo::Successful;
}
};
// A is sparse, B is dense
template <int UploA, int UploB>
class SymShiftInvertHelper<true, false, UploA, UploB>
{
public:
template <typename Scalar, typename Fac, typename ArgA, typename ArgB>
static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma)
{
using Matrix = typename ArgB::PlainObject;
// Construct the <UploB> triangular part of -sigma*B
Matrix mat(B.rows(), B.cols());
mat.template triangularView<UploB>() = -sigma * B;
// Update <UploB> triangular part of mat
if (UploA == UploB)
mat += A.template triangularView<UploB>();
else
mat += A.template triangularView<UploA>().transpose();
// BKLDLT solver
fac.compute(mat, UploB);
// Return true if successful
return fac.info() == CompInfo::Successful;
}
};
/// \endcond
///
/// \ingroup MatOp
///
/// This class defines matrix operations required by the generalized eigen solver
/// in the shift-and-invert mode. Given two symmetric matrices \f$A\f$ and \f$B\f$,
/// it solves the linear equation \f$y=(A-\sigma B)^{-1}x\f$, where \f$\sigma\f$ is a real shift.
/// Each of \f$A\f$ and \f$B\f$ can be a dense or sparse matrix.
///
/// This class is intended to be used with the SymGEigsShiftSolver generalized eigen solver.
///
/// \tparam Scalar_ The element type of the matrices.
/// Currently supported types are `float`, `double`, and `long double`.
/// \tparam TypeA The type of the \f$A\f$ matrix, indicating whether \f$A\f$ is
/// dense or sparse. Possible values are `Eigen::Dense` and `Eigen::Sparse`.
/// \tparam TypeB The type of the \f$B\f$ matrix, indicating whether \f$B\f$ is
/// dense or sparse. Possible values are `Eigen::Dense` and `Eigen::Sparse`.
/// \tparam UploA Whether the lower or upper triangular part of \f$A\f$ should be used.
/// Possible values are `Eigen::Lower` and `Eigen::Upper`.
/// \tparam UploB Whether the lower or upper triangular part of \f$B\f$ should be used.
/// Possible values are `Eigen::Lower` and `Eigen::Upper`.
/// \tparam FlagsA Additional flags for the matrix class of \f$A\f$.
/// Possible values are `Eigen::ColMajor` and `Eigen::RowMajor`.
/// \tparam FlagsB Additional flags for the matrix class of \f$B\f$.
/// Possible values are `Eigen::ColMajor` and `Eigen::RowMajor`.
/// \tparam StorageIndexA The storage index type of the \f$A\f$ matrix, only used when \f$A\f$
/// is a sparse matrix.
/// \tparam StorageIndexB The storage index type of the \f$B\f$ matrix, only used when \f$B\f$
/// is a sparse matrix.
///
template <typename Scalar_, typename TypeA = Eigen::Sparse, typename TypeB = Eigen::Sparse,
int UploA = Eigen::Lower, int UploB = Eigen::Lower,
int FlagsA = Eigen::ColMajor, int FlagsB = Eigen::ColMajor,
typename StorageIndexA = int, typename StorageIndexB = int>
class SymShiftInvert
{
public:
///
/// Element type of the matrix.
///
using Scalar = Scalar_;
private:
using Index = Eigen::Index;
// Hypothetical type of the A matrix, either dense or sparse
using DenseTypeA = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, FlagsA>;
using SparseTypeA = Eigen::SparseMatrix<Scalar, FlagsA, StorageIndexA>;
// Whether A is sparse
using ASparse = std::is_same<TypeA, Eigen::Sparse>;
// Actual type of the A matrix
using MatrixA = typename std::conditional<ASparse::value, SparseTypeA, DenseTypeA>::type;
// Hypothetical type of the B matrix, either dense or sparse
using DenseTypeB = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, FlagsB>;
using SparseTypeB = Eigen::SparseMatrix<Scalar, FlagsB, StorageIndexB>;
// Whether B is sparse
using BSparse = std::is_same<TypeB, Eigen::Sparse>;
// Actual type of the B matrix
using MatrixB = typename std::conditional<BSparse::value, SparseTypeB, DenseTypeB>::type;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
// The type of A-sigma*B if one of A and B is dense
// DenseType = if (A is dense) MatrixA else MatrixB
using DenseType = typename std::conditional<ASparse::value, MatrixB, MatrixA>::type;
// The type of A-sigma*B
// If both A and B are sparse, the result is MatrixA; otherwise the result is DenseType
using ResType = typename std::conditional<ASparse::value && BSparse::value, MatrixA, DenseType>::type;
// If both A and B are sparse, then the result A-sigma*B is sparse, so we use
// sparseLU for factorization; otherwise A-sigma*B is dense, and we use BKLDLT
using FacType = typename std::conditional<
ASparse::value && BSparse::value,
Eigen::SparseLU<ResType>,
BKLDLT<Scalar>>::type;
using ConstGenericMatrixA = const Eigen::Ref<const MatrixA>;
using ConstGenericMatrixB = const Eigen::Ref<const MatrixB>;
ConstGenericMatrixA m_matA;
ConstGenericMatrixB m_matB;
const Index m_n;
FacType m_solver;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param A A dense or sparse matrix object, whose type can be `Eigen::Matrix<...>`,
/// `Eigen::SparseMatrix<...>`, `Eigen::Map<Eigen::Matrix<...>>`,
/// `Eigen::Map<Eigen::SparseMatrix<...>>`, `Eigen::Ref<Eigen::Matrix<...>>`,
/// `Eigen::Ref<Eigen::SparseMatrix<...>>`, etc.
/// \param B A dense or sparse matrix object.
///
template <typename DerivedA, typename DerivedB>
SymShiftInvert(const Eigen::EigenBase<DerivedA>& A, const Eigen::EigenBase<DerivedB>& B) :
m_matA(A.derived()), m_matB(B.derived()), m_n(A.rows())
{
static_assert(
static_cast<int>(DerivedA::PlainObject::IsRowMajor) == static_cast<int>(MatrixA::IsRowMajor),
"SymShiftInvert: the \"FlagsA\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
static_assert(
static_cast<int>(DerivedB::PlainObject::IsRowMajor) == static_cast<int>(MatrixB::IsRowMajor),
"SymShiftInvert: the \"FlagsB\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)");
if (m_n != A.cols() || m_n != B.rows() || m_n != B.cols())
throw std::invalid_argument("SymShiftInvert: A and B must be square matrices of the same size");
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_n; }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_n; }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
constexpr bool AIsSparse = ASparse::value;
constexpr bool BIsSparse = BSparse::value;
using Helper = SymShiftInvertHelper<AIsSparse, BIsSparse, UploA, UploB>;
const bool success = Helper::factorize(m_solver, m_matA, m_matB, sigma);
if (!success)
throw std::invalid_argument("SymShiftInvert: factorization failed with the given shift");
}
///
/// Perform the shift-invert operation \f$y=(A-\sigma B)^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * B) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
MapConstVec x(x_in, m_n);
MapVec y(y_out, m_n);
y.noalias() = m_solver.solve(x);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_SHIFT_INVERT_H
| 10,177
| 40.373984
| 131
|
h
|
null |
LRMI-main/Spectra/MatOp/internal/ArnoldiOp.h
|
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_ARNOLDI_OP_H
#define SPECTRA_ARNOLDI_OP_H
#include "Eigen/Core"
#include <cmath> // std::sqrt
namespace Spectra {
///
/// \ingroup Internals
/// @{
///
///
/// \defgroup Operators Operators
///
/// Different types of operators.
///
///
/// \ingroup Operators
///
/// Operators used in the Arnoldi factorization.
///
template <typename Scalar, typename OpType, typename BOpType>
class ArnoldiOp
{
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache;
public:
ArnoldiOp(const OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
// Move constructor
ArnoldiOp(ArnoldiOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
inline Index rows() const { return m_op.rows(); }
// In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be <x, y> = x'By.
// For regular eigenvalue problems, it is the usual inner product <x, y> = x'y
// Compute <x, y> = x'By
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y) const
{
m_Bop.perform_op(y.data(), m_cache.data());
return x.dot(m_cache);
}
// Compute res = <X, y> = X'By
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
{
m_Bop.perform_op(y.data(), m_cache.data());
res.noalias() = x.transpose() * m_cache;
}
// B-norm of a vector, ||x||_B = sqrt(x'Bx)
template <typename Arg>
Scalar norm(const Arg& x) const
{
using std::sqrt;
return sqrt(inner_product<Arg, Arg>(x, x));
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_op.perform_op(x_in, y_out);
}
};
///
/// \ingroup Operators
///
/// Placeholder for the B-operator when \f$B = I\f$.
///
class IdentityBOp
{};
///
/// \ingroup Operators
///
/// Partial specialization for the case \f$B = I\f$.
///
template <typename Scalar, typename OpType>
class ArnoldiOp<Scalar, OpType, IdentityBOp>
{
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
public:
ArnoldiOp(const OpType& op, const IdentityBOp& /*Bop*/) :
m_op(op)
{}
inline Index rows() const { return m_op.rows(); }
// Compute <x, y> = x'y
// x and y are two vectors
template <typename Arg1, typename Arg2>
Scalar inner_product(const Arg1& x, const Arg2& y) const
{
return x.dot(y);
}
// Compute res = <X, y> = X'y
// X is a matrix, y is a vector, res is a vector
template <typename Arg1, typename Arg2>
void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const
{
res.noalias() = x.transpose() * y;
}
// B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm
template <typename Arg>
Scalar norm(const Arg& x) const
{
return x.norm();
}
// The "A" operator to generate the Krylov subspace
inline void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_op.perform_op(x_in, y_out);
}
};
///
/// @}
///
} // namespace Spectra
#endif // SPECTRA_ARNOLDI_OP_H
| 3,901
| 23.540881
| 100
|
h
|
null |
LRMI-main/Spectra/MatOp/internal/SymGEigsBucklingOp.h
|
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_BUCKLING_OP_H
#define SPECTRA_SYM_GEIGS_BUCKLING_OP_H
#include <Eigen/Core>
#include "../SymShiftInvert.h"
#include "../SparseSymMatProd.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// buckling mode. It computes \f$y=(K-\sigma K_G)^{-1}Kx\f$ for any
/// vector \f$x\f$, where \f$K\f$ is positive definite, \f$K_G\f$ is symmetric,
/// and \f$\sigma\f$ is a real shift.
/// This class is intended for internal use.
///
template <typename OpType = SymShiftInvert<double>,
typename BOpType = SparseSymMatProd<double>>
class SymGEigsBucklingOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$(K-\sigma K_G)^{-1}\f$ matrix operation object.
/// \param Bop The \f$K\f$ matrix operation object.
///
SymGEigsBucklingOp(OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsBucklingOp(SymGEigsBucklingOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_op.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_op.rows(); }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_op.set_shift(sigma);
}
///
/// Perform the matrix operation \f$y=(K-\sigma K_G)^{-1}Kx\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(K - sigma * K_G) * K * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_Bop.perform_op(x_in, m_cache.data());
m_op.perform_op(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_BUCKLING_OP_H
| 2,671
| 26.833333
| 79
|
h
|
null |
LRMI-main/Spectra/MatOp/internal/SymGEigsCayleyOp.h
|
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_CAYLEY_OP_H
#define SPECTRA_SYM_GEIGS_CAYLEY_OP_H
#include <Eigen/Core>
#include "../SymShiftInvert.h"
#include "../SparseSymMatProd.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// Cayley mode. It computes \f$y=(A-\sigma B)^{-1}(A+\sigma B)x\f$ for any
/// vector \f$x\f$, where \f$A\f$ is a symmetric matrix, \f$B\f$ is positive definite,
/// and \f$\sigma\f$ is a real shift.
/// This class is intended for internal use.
///
template <typename OpType = SymShiftInvert<double>,
typename BOpType = SparseSymMatProd<double>>
class SymGEigsCayleyOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using MapConstVec = Eigen::Map<const Vector>;
using MapVec = Eigen::Map<Vector>;
OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
Scalar m_sigma;
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$(A-\sigma B)^{-1}\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsCayleyOp(OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsCayleyOp(SymGEigsCayleyOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop), m_sigma(other.m_sigma)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_op.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_op.rows(); }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_op.set_shift(sigma);
m_sigma = sigma;
}
///
/// Perform the matrix operation \f$y=(A-\sigma B)^{-1}(A+\sigma B)x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * B) * (A + sigma * B) * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
// inv(A - sigma * B) * (A + sigma * B) * x
// = inv(A - sigma * B) * (A - sigma * B + 2 * sigma * B) * x
// = x + 2 * sigma * inv(A - sigma * B) * B * x
m_Bop.perform_op(x_in, m_cache.data());
m_op.perform_op(m_cache.data(), y_out);
MapConstVec x(x_in, this->rows());
MapVec y(y_out, this->rows());
y.noalias() = x + (Scalar(2) * m_sigma) * y;
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_CAYLEY_OP_H
| 3,163
| 28.849057
| 86
|
h
|
null |
LRMI-main/Spectra/MatOp/internal/SymGEigsCholeskyOp.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
#define SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
#include <Eigen/Core>
#include "../DenseSymMatProd.h"
#include "../DenseCholesky.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any
/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$.
/// This class is intended for internal use.
///
template <typename OpType = DenseSymMatProd<double>,
typename BOpType = DenseCholesky<double>>
class SymGEigsCholeskyOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$A\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsCholeskyOp(const OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsCholeskyOp(SymGEigsCholeskyOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(L) * A * inv(L') * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_Bop.upper_triangular_solve(x_in, y_out);
m_op.perform_op(y_out, m_cache.data());
m_Bop.lower_triangular_solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
| 2,548
| 27.965909
| 80
|
h
|
null |
LRMI-main/Spectra/MatOp/internal/SymGEigsRegInvOp.h
|
// Copyright (C) 2017-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_REG_INV_OP_H
#define SPECTRA_SYM_GEIGS_REG_INV_OP_H
#include <Eigen/Core>
#include "../SparseSymMatProd.h"
#include "../SparseRegularInverse.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// regular inverse mode. This class is intended for internal use.
///
template <typename OpType = SparseSymMatProd<double>,
typename BOpType = SparseRegularInverse<double>>
class SymGEigsRegInvOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
const OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$A\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsRegInvOp(const OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsRegInvOp(SymGEigsRegInvOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_Bop.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_Bop.rows(); }
///
/// Perform the matrix operation \f$y=B^{-1}Ax\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(B) * A * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_op.perform_op(x_in, m_cache.data());
m_Bop.solve(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_REG_INV_OP_H
| 2,330
| 26.423529
| 79
|
h
|
null |
LRMI-main/Spectra/MatOp/internal/SymGEigsShiftInvertOp.h
|
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
#define SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
#include <Eigen/Core>
#include "../SymShiftInvert.h"
#include "../SparseSymMatProd.h"
namespace Spectra {
///
/// \ingroup Operators
///
/// This class defines the matrix operation for generalized eigen solver in the
/// shift-and-invert mode. It computes \f$y=(A-\sigma B)^{-1}Bx\f$ for any
/// vector \f$x\f$, where \f$A\f$ is a symmetric matrix, \f$B\f$ is positive definite,
/// and \f$\sigma\f$ is a real shift.
/// This class is intended for internal use.
///
template <typename OpType = SymShiftInvert<double>,
typename BOpType = SparseSymMatProd<double>>
class SymGEigsShiftInvertOp
{
public:
using Scalar = typename OpType::Scalar;
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
OpType& m_op;
const BOpType& m_Bop;
mutable Vector m_cache; // temporary working space
public:
///
/// Constructor to create the matrix operation object.
///
/// \param op The \f$(A-\sigma B)^{-1}\f$ matrix operation object.
/// \param Bop The \f$B\f$ matrix operation object.
///
SymGEigsShiftInvertOp(OpType& op, const BOpType& Bop) :
m_op(op), m_Bop(Bop), m_cache(op.rows())
{}
///
/// Move constructor.
///
SymGEigsShiftInvertOp(SymGEigsShiftInvertOp&& other) :
m_op(other.m_op), m_Bop(other.m_Bop)
{
// We emulate the move constructor for Vector using Vector::swap()
m_cache.swap(other.m_cache);
}
///
/// Return the number of rows of the underlying matrix.
///
Index rows() const { return m_op.rows(); }
///
/// Return the number of columns of the underlying matrix.
///
Index cols() const { return m_op.rows(); }
///
/// Set the real shift \f$\sigma\f$.
///
void set_shift(const Scalar& sigma)
{
m_op.set_shift(sigma);
}
///
/// Perform the matrix operation \f$y=(A-\sigma B)^{-1}Bx\f$.
///
/// \param x_in Pointer to the \f$x\f$ vector.
/// \param y_out Pointer to the \f$y\f$ vector.
///
// y_out = inv(A - sigma * B) * B * x_in
void perform_op(const Scalar* x_in, Scalar* y_out) const
{
m_Bop.perform_op(x_in, m_cache.data());
m_op.perform_op(m_cache.data(), y_out);
}
};
} // namespace Spectra
#endif // SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
| 2,702
| 27.15625
| 86
|
h
|
null |
LRMI-main/Spectra/Util/CompInfo.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_COMP_INFO_H
#define SPECTRA_COMP_INFO_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to report the status of computation.
///
enum class CompInfo
{
Successful, ///< Computation was successful.
NotComputed, ///< Used in eigen solvers, indicating that computation
///< has not been conducted. Users should call
///< the `compute()` member function of solvers.
NotConverging, ///< Used in eigen solvers, indicating that some eigenvalues
///< did not converge. The `compute()`
///< function returns the number of converged eigenvalues.
NumericalIssue ///< Used in various matrix factorization classes, for example in
///< Cholesky decomposition it indicates that the
///< matrix is not positive definite.
};
} // namespace Spectra
#endif // SPECTRA_COMP_INFO_H
| 1,213
| 31.810811
| 85
|
h
|
null |
LRMI-main/Spectra/Util/GEigsMode.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_GEIGS_MODE_H
#define SPECTRA_GEIGS_MODE_H
namespace Spectra {
///
/// \ingroup Enumerations
///
/// The enumeration to specify the mode of generalized eigenvalue solver.
///
enum class GEigsMode
{
Cholesky, ///< Using Cholesky decomposition to solve generalized eigenvalues.
RegularInverse, ///< Regular inverse mode for generalized eigenvalue solver.
ShiftInvert, ///< Shift-and-invert mode for generalized eigenvalue solver.
Buckling, ///< Buckling mode for generalized eigenvalue solver.
Cayley ///< Cayley transformation mode for generalized eigenvalue solver.
};
} // namespace Spectra
#endif // SPECTRA_GEIGS_MODE_H
| 959
| 32.103448
| 88
|
h
|
null |
LRMI-main/Spectra/Util/SelectionRule.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SELECTION_RULE_H
#define SPECTRA_SELECTION_RULE_H
#include <vector> // std::vector
#include <cmath> // std::abs
#include <algorithm> // std::sort
#include <complex> // std::complex
#include <utility> // std::pair
#include <stdexcept> // std::invalid_argument
#include "Eigen/Core"
#include "TypeTraits.h"
namespace Spectra {
///
/// \defgroup Enumerations Enumerations
///
/// Enumeration types for the selection rule of eigenvalues.
///
///
/// \ingroup Enumerations
///
/// The enumeration of selection rules of desired eigenvalues.
///
enum class SortRule
{
LargestMagn, ///< Select eigenvalues with largest magnitude. Magnitude
///< means the absolute value for real numbers and norm for
///< complex numbers. Applies to both symmetric and general
///< eigen solvers.
LargestReal, ///< Select eigenvalues with largest real part. Only for general eigen solvers.
LargestImag, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers.
LargestAlge, ///< Select eigenvalues with largest algebraic value, considering
///< any negative sign. Only for symmetric eigen solvers.
SmallestMagn, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general
///< eigen solvers.
SmallestReal, ///< Select eigenvalues with smallest real part. Only for general eigen solvers.
SmallestImag, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers.
SmallestAlge, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers.
BothEnds ///< Select eigenvalues half from each end of the spectrum. When
///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers.
};
/// \cond
// When comparing eigenvalues, we first calculate the "target" to sort.
// For example, if we want to choose the eigenvalues with
// largest magnitude, the target will be -abs(x).
// The minus sign is due to the fact that std::sort() sorts in ascending order.
// Default target: throw an exception
template <typename Scalar, SortRule Rule>
class SortingTarget
{
public:
static ElemType<Scalar> get(const Scalar& val)
{
using std::abs;
throw std::invalid_argument("incompatible selection rule");
return -abs(val);
}
};
// Specialization for SortRule::LargestMagn
// This covers [float, double, complex] x [SortRule::LargestMagn]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::LargestMagn>
{
public:
static ElemType<Scalar> get(const Scalar& val)
{
using std::abs;
return -abs(val);
}
};
// Specialization for SortRule::LargestReal
// This covers [complex] x [SortRule::LargestReal]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::LargestReal>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return -val.real();
}
};
// Specialization for SortRule::LargestImag
// This covers [complex] x [SortRule::LargestImag]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::LargestImag>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return -abs(val.imag());
}
};
// Specialization for SortRule::LargestAlge
// This covers [float, double] x [SortRule::LargestAlge]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::LargestAlge>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Here SortRule::BothEnds is the same as SortRule::LargestAlge, but
// we need some additional steps, which are done in
// SymEigsSolver.h => retrieve_ritzpair().
// There we move the smallest values to the proper locations.
template <typename Scalar>
class SortingTarget<Scalar, SortRule::BothEnds>
{
public:
static Scalar get(const Scalar& val)
{
return -val;
}
};
// Specialization for SortRule::SmallestMagn
// This covers [float, double, complex] x [SortRule::SmallestMagn]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::SmallestMagn>
{
public:
static ElemType<Scalar> get(const Scalar& val)
{
using std::abs;
return abs(val);
}
};
// Specialization for SortRule::SmallestReal
// This covers [complex] x [SortRule::SmallestReal]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::SmallestReal>
{
public:
static RealType get(const std::complex<RealType>& val)
{
return val.real();
}
};
// Specialization for SortRule::SmallestImag
// This covers [complex] x [SortRule::SmallestImag]
template <typename RealType>
class SortingTarget<std::complex<RealType>, SortRule::SmallestImag>
{
public:
static RealType get(const std::complex<RealType>& val)
{
using std::abs;
return abs(val.imag());
}
};
// Specialization for SortRule::SmallestAlge
// This covers [float, double] x [SortRule::SmallestAlge]
template <typename Scalar>
class SortingTarget<Scalar, SortRule::SmallestAlge>
{
public:
static Scalar get(const Scalar& val)
{
return val;
}
};
// Sort eigenvalues
template <typename T, SortRule Rule>
class SortEigenvalue
{
private:
using Index = Eigen::Index;
using IndexArray = std::vector<Index>;
const T* m_evals;
IndexArray m_index;
public:
// Sort indices according to the eigenvalues they point to
inline bool operator()(Index i, Index j)
{
return SortingTarget<T, Rule>::get(m_evals[i]) < SortingTarget<T, Rule>::get(m_evals[j]);
}
SortEigenvalue(const T* start, Index size) :
m_evals(start), m_index(size)
{
for (Index i = 0; i < size; i++)
{
m_index[i] = i;
}
std::sort(m_index.begin(), m_index.end(), *this);
}
inline IndexArray index() const { return m_index; }
inline void swap(IndexArray& other) { m_index.swap(other); }
};
// Sort values[:len] according to the selection rule, and return the indices
template <typename Scalar>
std::vector<Eigen::Index> argsort(SortRule selection, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& values, Eigen::Index len)
{
using Index = Eigen::Index;
// Sort Ritz values and put the wanted ones at the beginning
std::vector<Index> ind;
switch (selection)
{
case SortRule::LargestMagn:
{
SortEigenvalue<Scalar, SortRule::LargestMagn> sorting(values.data(), len);
sorting.swap(ind);
break;
}
case SortRule::BothEnds:
case SortRule::LargestAlge:
{
SortEigenvalue<Scalar, SortRule::LargestAlge> sorting(values.data(), len);
sorting.swap(ind);
break;
}
case SortRule::SmallestMagn:
{
SortEigenvalue<Scalar, SortRule::SmallestMagn> sorting(values.data(), len);
sorting.swap(ind);
break;
}
case SortRule::SmallestAlge:
{
SortEigenvalue<Scalar, SortRule::SmallestAlge> sorting(values.data(), len);
sorting.swap(ind);
break;
}
default:
throw std::invalid_argument("unsupported selection rule");
}
// For SortRule::BothEnds, the eigenvalues are sorted according to the
// SortRule::LargestAlge rule, so we need to move those smallest values to the left
// The order would be
// Largest => Smallest => 2nd largest => 2nd smallest => ...
// We keep this order since the first k values will always be
// the wanted collection, no matter k is nev_updated (used in SymEigsBase::restart())
// or is nev (used in SymEigsBase::sort_ritzpair())
if (selection == SortRule::BothEnds)
{
std::vector<Index> ind_copy(ind);
for (Index i = 0; i < len; i++)
{
// If i is even, pick values from the left (large values)
// If i is odd, pick values from the right (small values)
if (i % 2 == 0)
ind[i] = ind_copy[i / 2];
else
ind[i] = ind_copy[len - 1 - i / 2];
}
}
return ind;
}
// Default vector length
template <typename Scalar>
std::vector<Eigen::Index> argsort(SortRule selection, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& values)
{
return argsort<Scalar>(selection, values, values.size());
}
/// \endcond
} // namespace Spectra
#endif // SPECTRA_SELECTION_RULE_H
| 8,908
| 28.598007
| 127
|
h
|
null |
LRMI-main/Spectra/Util/SimpleRandom.h
|
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_SIMPLE_RANDOM_H
#define SPECTRA_SIMPLE_RANDOM_H
#include "Eigen/Core"
/// \cond
namespace Spectra {
// We need a simple pseudo random number generator here:
// 1. It is used to generate initial and restarted residual vector.
// 2. It is not necessary to be so "random" and advanced. All we hope
// is that the residual vector is not in the space spanned by the
// current Krylov space. This should be met almost surely.
// 3. We don't want to call RNG in C++, since we actually want the
// algorithm to be deterministic. Also, calling RNG in C/C++ is not
// allowed in R packages submitted to CRAN.
// 4. The method should be as simple as possible, so an LCG is enough.
// 5. Based on public domain code by Ray Gardner
// http://stjarnhimlen.se/snippets/rg_rand.c
template <typename Scalar = double>
class SimpleRandom
{
private:
using Index = Eigen::Index;
using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
static constexpr unsigned int m_a = 16807; // multiplier
static constexpr unsigned long m_max = 2147483647L; // 2^31 - 1
long m_rand; // RNG state
inline long next_long_rand(long seed) const
{
unsigned long lo, hi;
lo = m_a * (long) (seed & 0xFFFF);
hi = m_a * (long) ((unsigned long) seed >> 16);
lo += (hi & 0x7FFF) << 16;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
lo += hi >> 15;
if (lo > m_max)
{
lo &= m_max;
++lo;
}
return (long) lo;
}
public:
SimpleRandom(unsigned long init_seed) :
m_rand(init_seed ? (init_seed & m_max) : 1)
{}
// Return a single random number, ranging from -0.5 to 0.5
Scalar random()
{
m_rand = next_long_rand(m_rand);
return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5);
}
// Fill the given vector with random numbers
// Ranging from -0.5 to 0.5
void random_vec(Vector& vec)
{
const Index len = vec.size();
for (Index i = 0; i < len; i++)
{
m_rand = next_long_rand(m_rand);
vec[i] = Scalar(m_rand);
}
vec.array() = vec.array() / Scalar(m_max) - Scalar(0.5);
}
// Return a vector of random numbers
// Ranging from -0.5 to 0.5
Vector random_vec(const Index len)
{
Vector res(len);
random_vec(res);
return res;
}
};
} // namespace Spectra
/// \endcond
#endif // SPECTRA_SIMPLE_RANDOM_H
| 2,841
| 27.42
| 70
|
h
|
null |
LRMI-main/Spectra/Util/TypeTraits.h
|
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_TYPE_TRAITS_H
#define SPECTRA_TYPE_TRAITS_H
#include "Eigen/Core"
#include <limits>
/// \cond
// Clang-Format will have unintended effects:
// static constexpr Scalar(min)()
// So we turn it off here
//
// clang-format off
namespace Spectra {
// For a real value type "Scalar", we want to know its smallest
// positive value, i.e., std::numeric_limits<Scalar>::min().
// However, we must take non-standard value types into account,
// so we rely on Eigen::NumTraits.
//
// Eigen::NumTraits has defined epsilon() and lowest(), but
// lowest() means negative highest(), which is a very small
// negative value.
//
// Therefore, we manually define this limit, and use eplison()^3
// to mimic it for non-standard types.
// Generic definition
template <typename Scalar>
struct TypeTraits
{
static constexpr Scalar epsilon()
{
return Eigen::numext::numeric_limits<Scalar>::epsilon();
}
static constexpr Scalar (min)()
{
return epsilon() * epsilon() * epsilon();
}
};
// Full specialization
template <>
struct TypeTraits<float>
{
static constexpr float epsilon()
{
return std::numeric_limits<float>::epsilon();
}
static constexpr float (min)()
{
return (std::numeric_limits<float>::min)();
}
};
template <>
struct TypeTraits<double>
{
static constexpr double epsilon()
{
return std::numeric_limits<double>::epsilon();
}
static constexpr double (min)()
{
return (std::numeric_limits<double>::min)();
}
};
template <>
struct TypeTraits<long double>
{
static constexpr long double epsilon()
{
return std::numeric_limits<long double>::epsilon();
}
static constexpr long double (min)()
{
return (std::numeric_limits<long double>::min)();
}
};
// Get the element type of a "scalar"
// ElemType<double> => double
// ElemType<std::complex<double>> => double
template <typename T>
using ElemType = typename Eigen::NumTraits<T>::Real;
} // namespace Spectra
/// \endcond
#endif // SPECTRA_TYPE_TRAITS_H
| 2,365
| 22.66
| 70
|
h
|
null |
LRMI-main/Spectra/Util/Version.h
|
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
#ifndef SPECTRA_VERSION_H
#define SPECTRA_VERSION_H
#define SPECTRA_MAJOR_VERSION 1
#define SPECTRA_MINOR_VERSION 0
#define SPECTRA_PATCH_VERSION 0
#define SPECTRA_VERSION (SPECTRA_MAJOR_VERSION * 10000 + SPECTRA_MINOR_VERSION * 100 + SPECTRA_PATCH_VERSION)
#endif // SPECTRA_VERSION_H
| 556
| 31.764706
| 109
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
namespace Eigen {
/** \class TensorAssign
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor assignment class.
*
* This class is represents the assignment of the values resulting from the evaluation of
* the rhs expression to the memory locations denoted by the lhs expression.
*/
namespace internal {
template<typename LhsXprType, typename RhsXprType>
struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
{
typedef typename LhsXprType::Scalar Scalar;
typedef typename traits<LhsXprType>::StorageKind StorageKind;
typedef typename promote_index_type<typename traits<LhsXprType>::Index,
typename traits<RhsXprType>::Index>::type Index;
typedef typename LhsXprType::Nested LhsNested;
typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested;
static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
static const int Layout = internal::traits<LhsXprType>::Layout;
typedef typename traits<LhsXprType>::PointerType PointerType;
enum {
Flags = 0
};
};
template<typename LhsXprType, typename RhsXprType>
struct eval<TensorAssignOp<LhsXprType, RhsXprType>, Eigen::Dense>
{
typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
};
template<typename LhsXprType, typename RhsXprType>
struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
{
typedef TensorAssignOp<LhsXprType, RhsXprType> type;
};
} // end namespace internal
template<typename LhsXprType, typename RhsXprType>
class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> >
{
public:
typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename LhsXprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested;
typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index;
static const int NumDims = Eigen::internal::traits<TensorAssignOp>::NumDimensions;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC
typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return *((typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
protected:
typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr;
const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr;
};
template<typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
{
typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
static const int NumDims = XprType::NumDims;
enum {
IsAligned = int(TensorEvaluator<LeftArgType, Device>::IsAligned) &
int(TensorEvaluator<RightArgType, Device>::IsAligned),
PacketAccess = int(TensorEvaluator<LeftArgType, Device>::PacketAccess) &
int(TensorEvaluator<RightArgType, Device>::PacketAccess),
BlockAccess = int(TensorEvaluator<LeftArgType, Device>::BlockAccess) &
int(TensorEvaluator<RightArgType, Device>::BlockAccess),
PreferBlockAccess = int(TensorEvaluator<LeftArgType, Device>::PreferBlockAccess) |
int(TensorEvaluator<RightArgType, Device>::PreferBlockAccess),
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef typename TensorEvaluator<const RightArgType, Device>::TensorBlock
RightTensorBlock;
//===--------------------------------------------------------------------===//
TensorEvaluator(const XprType& op, const Device& device) :
m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device)
{
EIGEN_STATIC_ASSERT(
(static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// The dimensions of the lhs and the rhs tensors should be equal to prevent
// overflows and ensure the result is fully initialized.
// TODO: use left impl instead if right impl dimensions are known at compile time.
return m_rightImpl.dimensions();
}
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
m_leftImpl.evalSubExprsIfNeeded(NULL);
// If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non
// null value), attempt to evaluate the rhs expression in place. Returns true iff in place
// evaluation isn't supported and the caller still needs to manually assign the values generated
// by the rhs to the lhs.
return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
}
#ifdef EIGEN_USE_THREADS
template <typename EvalSubExprsCallback>
EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
EvaluatorPointerType, EvalSubExprsCallback done) {
m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) {
m_rightImpl.evalSubExprsIfNeededAsync(
m_leftImpl.data(), [done](bool need_assign) { done(need_assign); });
});
}
#endif // EIGEN_USE_THREADS
EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ? Aligned : Unaligned;
const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ? Aligned : Unaligned;
m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_leftImpl.coeff(index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
{
return m_leftImpl.template packet<LoadMode>(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
// We assume that evalPacket or evalScalar is called to perform the
// assignment and account for the cost of the write here, but reduce left
// cost by one load because we are using m_leftImpl.coeffRef.
TensorOpCost left = m_leftImpl.costPerCoeff(vectorized);
return m_rightImpl.costPerCoeff(vectorized) +
TensorOpCost(
numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)),
left.bytes_stored(), left.compute_cycles()) +
TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
return internal::TensorBlockResourceRequirements::merge(
m_leftImpl.getResourceRequirements(),
m_rightImpl.getResourceRequirements());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock(
TensorBlockDesc& desc, TensorBlockScratch& scratch) {
if (TensorEvaluator<LeftArgType, Device>::RawAccess &&
m_leftImpl.data() != NULL) {
// If destination has raw data access, we pass it as a potential
// destination for a block descriptor evaluation.
desc.template AddDestinationBuffer<Layout>(
/*dst_base=*/m_leftImpl.data() + desc.offset(),
/*dst_strides=*/internal::strides<Layout>(m_leftImpl.dimensions()));
}
RightTensorBlock block = m_rightImpl.block(desc, scratch, /*root_of_expr_ast=*/true);
// If block was evaluated into a destination, there is no need to do assignment.
if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) {
m_leftImpl.writeBlock(desc, block);
}
block.cleanup();
}
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_leftImpl.bind(cgh);
m_rightImpl.bind(cgh);
}
#endif
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_leftImpl.data(); }
private:
TensorEvaluator<LeftArgType, Device> m_leftImpl;
TensorEvaluator<RightArgType, Device> m_rightImpl;
};
}
#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
| 10,323
| 40.629032
| 118
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
namespace Eigen {
/** \class TensorKChippingReshaping
* \ingroup CXX11_Tensor_Module
*
* \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor.
*
*
*/
namespace internal {
template<DenseIndex DimId, typename XprType>
struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions - 1;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<DenseIndex DimId, typename XprType>
struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense>
{
typedef const TensorChippingOp<DimId, XprType> EIGEN_DEVICE_REF type;
};
template<DenseIndex DimId, typename XprType>
struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
{
typedef TensorChippingOp<DimId, XprType> type;
};
template <DenseIndex DimId>
struct DimensionId
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) {
EIGEN_UNUSED_VARIABLE(dim);
eigen_assert(dim == DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
return DimId;
}
};
template <>
struct DimensionId<Dynamic>
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) {
eigen_assert(dim >= 0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
return actual_dim;
}
private:
const DenseIndex actual_dim;
};
} // end namespace internal
template<DenseIndex DimId, typename XprType>
class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
{
public:
typedef TensorBase<TensorChippingOp<DimId, XprType> > Base;
typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested;
typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim)
: m_xpr(expr), m_offset(offset), m_dim(dim) {
}
EIGEN_DEVICE_FUNC
const Index offset() const { return m_offset; }
EIGEN_DEVICE_FUNC
const Index dim() const { return m_dim.actualDim(); }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorChippingOp)
protected:
typename XprType::Nested m_xpr;
const Index m_offset;
const internal::DimensionId<DimId> m_dim;
};
// Eval as rvalue
template<DenseIndex DimId, typename ArgType, typename Device>
struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{
typedef TensorChippingOp<DimId, ArgType> XprType;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumDims = NumInputDims-1;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
// Alignment can't be guaranteed at compile time since it depends on the
// slice offsets.
IsAligned = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
// Chipping of outer-most dimension is a trivial operation, because we can
// read and write directly from the underlying tensor using single offset.
IsOuterChipping = (static_cast<int>(Layout) == ColMajor && DimId == NumInputDims - 1) ||
(static_cast<int>(Layout) == RowMajor && DimId == 0),
// Chipping inner-most dimension.
IsInnerChipping = (static_cast<int>(Layout) == ColMajor && DimId == 0) ||
(static_cast<int>(Layout) == RowMajor && DimId == NumInputDims - 1),
// Prefer block access if the underlying expression prefers it, otherwise
// only if chipping is not trivial.
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess ||
!IsOuterChipping,
CoordAccess = false, // to be implemented
RawAccess = false
};
typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef internal::TensorBlockDescriptor<NumInputDims, Index>
ArgTensorBlockDesc;
typedef typename TensorEvaluator<const ArgType, Device>::TensorBlock
ArgTensorBlock;
typedef typename internal::TensorMaterializedBlock<ScalarNoConst, NumDims,
Layout, Index>
TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
{
EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(NumInputDims > m_dim.actualDim());
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
eigen_assert(op.offset() < input_dims[m_dim.actualDim()]);
int j = 0;
for (int i = 0; i < NumInputDims; ++i) {
if (i != m_dim.actualDim()) {
m_dimensions[j] = input_dims[i];
++j;
}
}
m_stride = 1;
m_inputStride = 1;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < m_dim.actualDim(); ++i) {
m_stride *= input_dims[i];
m_inputStride *= input_dims[i];
}
} else {
for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) {
m_stride *= input_dims[i];
m_inputStride *= input_dims[i];
}
}
m_inputStride *= input_dims[m_dim.actualDim()];
m_inputOffset = m_stride * op.offset();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_impl.coeff(srcCoeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
if (isInnerChipping()) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1);
Index inputIndex = index * m_inputStride + m_inputOffset;
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = m_impl.coeff(inputIndex);
inputIndex += m_inputStride;
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
} else if (isOuterChipping()) {
// m_stride is always greater than index, so let's avoid the integer division.
eigen_assert(m_stride > index);
return m_impl.template packet<LoadMode>(index + m_inputOffset);
} else {
const Index idx = index / m_stride;
const Index rem = index - idx * m_stride;
if (rem + PacketSize <= m_stride) {
Index inputIndex = idx * m_inputStride + m_inputOffset + rem;
return m_impl.template packet<LoadMode>(inputIndex);
} else {
// Cross the stride boundary. Fallback to slow path.
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = coeff(index);
++index;
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
double cost = 0;
if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
m_dim.actualDim() == 0) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
m_dim.actualDim() == NumInputDims - 1)) {
cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>();
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) &&
m_dim.actualDim() == NumInputDims - 1) ||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) &&
m_dim.actualDim() == 0)) {
cost += TensorOpCost::AddCost<Index>();
} else {
cost += 3 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>() +
3 * TensorOpCost::AddCost<Index>();
}
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
const size_t target_size = m_device.lastLevelCacheSize();
return internal::TensorBlockResourceRequirements::merge(
internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size),
m_impl.getResourceRequirements());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool root_of_expr_ast = false) const {
const Index chip_dim = m_dim.actualDim();
DSizes<Index, NumInputDims> input_block_dims;
for (int i = 0; i < NumInputDims; ++i) {
input_block_dims[i]
= i < chip_dim ? desc.dimension(i)
: i > chip_dim ? desc.dimension(i - 1)
: 1;
}
ArgTensorBlockDesc arg_desc(srcCoeff(desc.offset()), input_block_dims);
// Try to reuse destination buffer for materializing argument block.
if (desc.HasDestinationBuffer()) {
DSizes<Index, NumInputDims> arg_destination_strides;
for (int i = 0; i < NumInputDims; ++i) {
arg_destination_strides[i]
= i < chip_dim ? desc.destination().strides()[i]
: i > chip_dim ? desc.destination().strides()[i - 1]
: 0; // for dimensions of size `1` stride should never be used.
}
arg_desc.template AddDestinationBuffer<Layout>(
desc.destination().template data<ScalarNoConst>(),
arg_destination_strides);
}
ArgTensorBlock arg_block = m_impl.block(arg_desc, scratch, root_of_expr_ast);
if (!arg_desc.HasDestinationBuffer()) desc.DropDestinationBuffer();
if (arg_block.data() != NULL) {
// Forward argument block buffer if possible.
return TensorBlock(arg_block.kind(), arg_block.data(),
desc.dimensions());
} else {
// Assign argument block expression to a buffer.
// Prepare storage for the materialized chipping result.
const typename TensorBlock::Storage block_storage =
TensorBlock::prepareStorage(desc, scratch);
typedef internal::TensorBlockAssignment<
ScalarNoConst, NumInputDims, typename ArgTensorBlock::XprType, Index>
TensorBlockAssignment;
TensorBlockAssignment::Run(
TensorBlockAssignment::target(
arg_desc.dimensions(),
internal::strides<Layout>(arg_desc.dimensions()),
block_storage.data()),
arg_block.expr());
return block_storage.AsTensorMaterializedBlock();
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const {
typename Storage::Type result = constCast(m_impl.data());
if (isOuterChipping() && result) {
return result + m_inputOffset;
} else {
return NULL;
}
}
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
{
Index inputIndex;
if (isInnerChipping()) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1);
inputIndex = index * m_inputStride + m_inputOffset;
} else if (isOuterChipping()) {
// m_stride is always greater than index, so let's avoid the integer
// division.
eigen_assert(m_stride > index);
inputIndex = index + m_inputOffset;
} else {
const Index idx = index / m_stride;
inputIndex = idx * m_inputStride + m_inputOffset;
index -= idx * m_stride;
inputIndex += index;
}
return inputIndex;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool isInnerChipping() const {
return IsInnerChipping ||
(static_cast<int>(Layout) == ColMajor && m_dim.actualDim() == 0) ||
(static_cast<int>(Layout) == RowMajor && m_dim.actualDim() == NumInputDims - 1);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool isOuterChipping() const {
return IsOuterChipping ||
(static_cast<int>(Layout) == ColMajor && m_dim.actualDim() == NumInputDims-1) ||
(static_cast<int>(Layout) == RowMajor && m_dim.actualDim() == 0);
}
Dimensions m_dimensions;
Index m_stride;
Index m_inputOffset;
Index m_inputStride;
TensorEvaluator<ArgType, Device> m_impl;
const internal::DimensionId<DimId> m_dim;
const Device EIGEN_DEVICE_REF m_device;
};
// Eval as lvalue
template<DenseIndex DimId, typename ArgType, typename Device>
struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
: public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{
typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base;
typedef TensorChippingOp<DimId, ArgType> XprType;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumDims = NumInputDims-1;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = TensorEvaluator<ArgType, Device>::RawAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
return this->m_impl.coeffRef(this->srcCoeff(index));
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
if (this->isInnerChipping()) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(this->m_stride == 1);
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
Index inputIndex = index * this->m_inputStride + this->m_inputOffset;
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
this->m_impl.coeffRef(inputIndex) = values[i];
inputIndex += this->m_inputStride;
}
} else if (this->isOuterChipping()) {
// m_stride is always greater than index, so let's avoid the integer division.
eigen_assert(this->m_stride > index);
this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
} else {
const Index idx = index / this->m_stride;
const Index rem = index - idx * this->m_stride;
if (rem + PacketSize <= this->m_stride) {
const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem;
this->m_impl.template writePacket<StoreMode>(inputIndex, x);
} else {
// Cross stride boundary. Fallback to slow path.
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
this->coeffRef(index) = values[i];
++index;
}
}
}
}
template <typename TensorBlock>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(
const TensorBlockDesc& desc, const TensorBlock& block) {
assert(this->m_impl.data() != NULL);
const Index chip_dim = this->m_dim.actualDim();
DSizes<Index, NumInputDims> input_block_dims;
for (int i = 0; i < NumInputDims; ++i) {
input_block_dims[i] = i < chip_dim ? desc.dimension(i)
: i > chip_dim ? desc.dimension(i - 1)
: 1;
}
typedef TensorReshapingOp<const DSizes<Index, NumInputDims>,
const typename TensorBlock::XprType>
TensorBlockExpr;
typedef internal::TensorBlockAssignment<Scalar, NumInputDims,
TensorBlockExpr, Index>
TensorBlockAssign;
TensorBlockAssign::Run(
TensorBlockAssign::target(
input_block_dims,
internal::strides<Layout>(this->m_impl.dimensions()),
this->m_impl.data(), this->srcCoeff(desc.offset())),
block.expr().reshape(input_block_dims));
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
| 19,707
| 36.973025
| 117
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
namespace Eigen {
namespace internal {
enum {
ShardByRow = 0,
ShardByCol = 1
};
// Default Blocking Strategy
template<typename ResScalar, typename LhsScalar, typename RhsScalar, typename StorageIndex, int ShardingType = ShardByCol>
class TensorContractionBlocking {
public:
/*
adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h`
requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h`
which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h`
which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
(else HIPCC will error out)
However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h`
results in NVCC erroring out with the following error
../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901:
dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function
*/
#if !defined(EIGEN_HIPCC)
EIGEN_DEVICE_FUNC
#endif
TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n, StorageIndex num_threads = 1) :
kc_(k), mc_(m), nc_(n)
{
if (ShardingType == ShardByCol) {
computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, mc_, nc_, num_threads);
}
else {
computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, nc_, mc_, num_threads);
}
const int rhs_packet_size = internal::packet_traits<RhsScalar>::size;
kc_ = (rhs_packet_size <= 8 || kc_ <= rhs_packet_size) ?
kc_ : (kc_ / rhs_packet_size) * rhs_packet_size;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; }
private:
StorageIndex kc_;
StorageIndex mc_;
StorageIndex nc_;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
| 2,675
| 35.162162
| 127
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
namespace Eigen {
/** \class TensorConversionOp
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor conversion class. This class makes it possible to vectorize
* type casting operations when the number of scalars per packet in the source
* and the destination type differ
*/
namespace internal {
template<typename TargetType, typename XprType>
struct traits<TensorConversionOp<TargetType, XprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef TargetType Scalar;
typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = traits<XprType>::Layout;
enum { Flags = 0 };
typedef typename TypeConversion<Scalar, typename traits<XprType>::PointerType>::type PointerType;
};
template<typename TargetType, typename XprType>
struct eval<TensorConversionOp<TargetType, XprType>, Eigen::Dense>
{
typedef const TensorConversionOp<TargetType, XprType>& type;
};
template<typename TargetType, typename XprType>
struct nested<TensorConversionOp<TargetType, XprType>, 1, typename eval<TensorConversionOp<TargetType, XprType> >::type>
{
typedef TensorConversionOp<TargetType, XprType> type;
};
} // end namespace internal
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket, int SrcCoeffRatio, int TgtCoeffRatio>
struct PacketConverter;
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 1, 1> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index));
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 2, 1> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
SrcPacket src1 = m_impl.template packet<LoadMode>(index);
SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2);
return result;
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 4, 1> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
SrcPacket src1 = m_impl.template packet<LoadMode>(index);
SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4);
return result;
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 8, 1> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
SrcPacket src1 = m_impl.template packet<LoadMode>(index);
SrcPacket src2 = m_impl.template packet<LoadMode>(index + 1 * SrcPacketSize);
SrcPacket src3 = m_impl.template packet<LoadMode>(index + 2 * SrcPacketSize);
SrcPacket src4 = m_impl.template packet<LoadMode>(index + 3 * SrcPacketSize);
SrcPacket src5 = m_impl.template packet<LoadMode>(index + 4 * SrcPacketSize);
SrcPacket src6 = m_impl.template packet<LoadMode>(index + 5 * SrcPacketSize);
SrcPacket src7 = m_impl.template packet<LoadMode>(index + 6 * SrcPacketSize);
SrcPacket src8 = m_impl.template packet<LoadMode>(index + 7 * SrcPacketSize);
TgtPacket result = internal::pcast<SrcPacket, TgtPacket>(src1, src2, src3, src4, src5, src6, src7, src8);
return result;
}
private:
const TensorEvaluator& m_impl;
};
template <typename TensorEvaluator, typename SrcPacket, typename TgtPacket, int TgtCoeffRatio>
struct PacketConverter<TensorEvaluator, SrcPacket, TgtPacket, 1, TgtCoeffRatio> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketConverter(const TensorEvaluator& impl)
: m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {}
template<int LoadMode, typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const {
const int SrcPacketSize = internal::unpacket_traits<SrcPacket>::size;
// Only call m_impl.packet() when we have direct access to the underlying data. This
// ensures that we don't compute the subexpression twice. We may however load some
// coefficients twice, but in practice this doesn't negatively impact performance.
if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) {
// Force unaligned memory loads since we can't ensure alignment anymore
return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<Unaligned>(index));
} else {
const int TgtPacketSize = internal::unpacket_traits<TgtPacket>::size;
typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
typedef typename internal::unpacket_traits<TgtPacket>::type TgtType;
internal::scalar_cast_op<SrcType, TgtType> converter;
EIGEN_ALIGN_MAX typename internal::unpacket_traits<TgtPacket>::type values[TgtPacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < TgtPacketSize; ++i) {
values[i] = converter(m_impl.coeff(index+i));
}
TgtPacket rslt = internal::pload<TgtPacket>(values);
return rslt;
}
}
private:
const TensorEvaluator& m_impl;
const typename TensorEvaluator::Index m_maxIndex;
};
template<typename TargetType, typename XprType>
class TensorConversionOp : public TensorBase<TensorConversionOp<TargetType, XprType>, ReadOnlyAccessors>
{
public:
typedef typename internal::traits<TensorConversionOp>::Scalar Scalar;
typedef typename internal::traits<TensorConversionOp>::StorageKind StorageKind;
typedef typename internal::traits<TensorConversionOp>::Index Index;
typedef typename internal::nested<TensorConversionOp>::type Nested;
typedef Scalar CoeffReturnType;
typedef typename NumTraits<Scalar>::Real RealScalar;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr)
: m_xpr(xpr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
template <bool SameType, typename Eval, typename EvalPointerType> struct ConversionSubExprEval {
static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType) {
impl.evalSubExprsIfNeeded(NULL);
return true;
}
};
template <typename Eval, typename EvalPointerType> struct ConversionSubExprEval<true, Eval, EvalPointerType> {
static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType data) {
return impl.evalSubExprsIfNeeded(data);
}
};
#ifdef EIGEN_USE_THREADS
template <bool SameType, typename Eval, typename EvalPointerType,
typename EvalSubExprsCallback>
struct ConversionSubExprEvalAsync {
static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType, EvalSubExprsCallback done) {
impl.evalSubExprsIfNeededAsync(nullptr, std::move(done));
}
};
template <typename Eval, typename EvalPointerType,
typename EvalSubExprsCallback>
struct ConversionSubExprEvalAsync<true, Eval, EvalPointerType,
EvalSubExprsCallback> {
static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType data, EvalSubExprsCallback done) {
impl.evalSubExprsIfNeededAsync(data, std::move(done));
}
};
#endif
namespace internal {
template <typename SrcType, typename TargetType, bool IsSameT>
struct CoeffConv {
template <typename ArgType, typename Device>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
internal::scalar_cast_op<SrcType, TargetType> converter;
return converter(impl.coeff(index));
}
};
template <typename SrcType, typename TargetType>
struct CoeffConv<SrcType, TargetType, true> {
template <typename ArgType, typename Device>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
return impl.coeff(index);
}
};
template <typename SrcPacket, typename TargetPacket, int LoadMode, bool ActuallyVectorize, bool IsSameT>
struct PacketConv {
typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
static const int PacketSize = internal::unpacket_traits<TargetPacket>::size;
template <typename ArgType, typename Device>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
internal::scalar_cast_op<SrcType, TargetType> converter;
EIGEN_ALIGN_MAX typename internal::remove_const<TargetType>::type values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = converter(impl.coeff(index+i));
}
TargetPacket rslt = internal::pload<TargetPacket>(values);
return rslt;
}
};
template <typename SrcPacket, typename TargetPacket, int LoadMode, bool IsSameT>
struct PacketConv<SrcPacket, TargetPacket, LoadMode, true, IsSameT> {
typedef typename internal::unpacket_traits<SrcPacket>::type SrcType;
typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
template <typename ArgType, typename Device>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
const int SrcCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
const int TgtCoeffRatio = internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
PacketConverter<TensorEvaluator<ArgType, Device>, SrcPacket, TargetPacket,
SrcCoeffRatio, TgtCoeffRatio> converter(impl);
return converter.template packet<LoadMode>(index);
}
};
template <typename SrcPacket, typename TargetPacket, int LoadMode>
struct PacketConv<SrcPacket, TargetPacket, LoadMode, /*ActuallyVectorize=*/false, /*IsSameT=*/true> {
typedef typename internal::unpacket_traits<TargetPacket>::type TargetType;
static const int PacketSize = internal::unpacket_traits<TargetPacket>::size;
template <typename ArgType, typename Device>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
EIGEN_ALIGN_MAX typename internal::remove_const<TargetType>::type values[PacketSize];
for (int i = 0; i < PacketSize; ++i) values[i] = impl.coeff(index+i);
return internal::pload<TargetPacket>(values);
}
};
template <typename SrcPacket, typename TargetPacket, int LoadMode>
struct PacketConv<SrcPacket, TargetPacket, LoadMode, /*ActuallyVectorize=*/true, /*IsSameT=*/true> {
template <typename ArgType, typename Device>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator<ArgType, Device>& impl, Index index) {
return impl.template packet<LoadMode>(index);
}
};
} // namespace internal
// Eval as rvalue
template<typename TargetType, typename ArgType, typename Device>
struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
{
typedef TensorConversionOp<TargetType, ArgType> XprType;
typedef typename XprType::Index Index;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
typedef TargetType Scalar;
typedef TargetType CoeffReturnType;
typedef typename internal::remove_all<typename internal::traits<ArgType>::Scalar>::type SrcType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef typename PacketType<SrcType, Device>::type PacketSourceType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
static const bool IsSameType = internal::is_same<TargetType, SrcType>::value;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = false,
PacketAccess =
#ifndef EIGEN_USE_SYCL
true,
#else
TensorEvaluator<ArgType, Device>::PacketAccess &
internal::type_casting_traits<SrcType, TargetType>::VectorizedCast,
#endif
BlockAccess = TensorEvaluator<ArgType, Device>::BlockAccess,
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
static const int NumDims = internal::array_size<Dimensions>::value;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef typename TensorEvaluator<const ArgType, Device>::TensorBlock
ArgTensorBlock;
struct TensorConversionOpBlockFactory {
template <typename ArgXprType>
struct XprType {
typedef TensorConversionOp<TargetType, const ArgXprType> type;
};
template <typename ArgXprType>
typename XprType<ArgXprType>::type expr(const ArgXprType& expr) const {
return typename XprType<ArgXprType>::type(expr);
}
};
typedef internal::TensorUnaryExprBlock<TensorConversionOpBlockFactory,
ArgTensorBlock>
TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
{
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data)
{
return ConversionSubExprEval<IsSameType, TensorEvaluator<ArgType, Device>, EvaluatorPointerType>::run(m_impl, data);
}
#ifdef EIGEN_USE_THREADS
template <typename EvalSubExprsCallback>
EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
EvaluatorPointerType data, EvalSubExprsCallback done) {
ConversionSubExprEvalAsync<IsSameType, TensorEvaluator<ArgType, Device>,
EvaluatorPointerType,
EvalSubExprsCallback>::run(m_impl, data, std::move(done));
}
#endif
EIGEN_STRONG_INLINE void cleanup()
{
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return internal::CoeffConv<SrcType, TargetType, IsSameType>::run(m_impl,index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType
packet(Index index) const {
// If we are not going to do the cast, we just need to check that base
// TensorEvaluator has packet access. Otherwise we also need to make sure,
// that we have an implementation of vectorized cast.
const bool Vectorizable =
IsSameType
? TensorEvaluator<ArgType, Device>::PacketAccess
: int(TensorEvaluator<ArgType, Device>::PacketAccess) &
int(internal::type_casting_traits<SrcType, TargetType>::VectorizedCast);
return internal::PacketConv<PacketSourceType, PacketReturnType, LoadMode,
Vectorizable, IsSameType>::run(m_impl, index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool vectorized) const {
const double cast_cost = TensorOpCost::CastCost<SrcType, TargetType>();
if (vectorized) {
const double SrcCoeffRatio =
internal::type_casting_traits<SrcType, TargetType>::SrcCoeffRatio;
const double TgtCoeffRatio =
internal::type_casting_traits<SrcType, TargetType>::TgtCoeffRatio;
return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) +
TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize));
} else {
return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost);
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
return m_impl.getResourceRequirements();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool /*root_of_expr_ast*/ = false) const {
return TensorBlock(m_impl.block(desc, scratch),
TensorConversionOpBlockFactory());
}
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
/// required by sycl in order to extract the sycl accessor
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
TensorEvaluator<ArgType, Device> m_impl;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H
| 18,803
| 40.146608
| 124
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Rasmus Munk Larsen <rmlarsen@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
#define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
namespace Eigen {
/** \class TensorEvaluator
* \ingroup CXX11_Tensor_Module
*
* \brief A cost model used to limit the number of threads used for evaluating
* tensor expression.
*
*/
// Class storing the cost of evaluating a tensor expression in terms of the
// estimated number of operand bytes loads, bytes stored, and compute cycles.
class TensorOpCost {
public:
// TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple
// model based on minimal reciprocal throughput numbers from Intel or
// Agner Fog's tables would be better than what is there now.
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() {
return internal::functor_traits<
internal::scalar_product_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() {
return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost;
}
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() {
return internal::functor_traits<
internal::scalar_quotient_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() {
return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost;
}
template <typename SrcType, typename TargetType>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() {
return internal::functor_traits<
internal::scalar_cast_op<SrcType, TargetType> >::Cost;
}
EIGEN_DEVICE_FUNC
TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
EIGEN_DEVICE_FUNC
TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles)
: bytes_loaded_(bytes_loaded),
bytes_stored_(bytes_stored),
compute_cycles_(compute_cycles) {}
EIGEN_DEVICE_FUNC
TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles,
bool vectorized, double packet_size)
: bytes_loaded_(bytes_loaded),
bytes_stored_(bytes_stored),
compute_cycles_(vectorized ? compute_cycles / packet_size
: compute_cycles) {
eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded));
eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored));
eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const {
return bytes_loaded_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const {
return bytes_stored_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const {
return compute_cycles_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost(
double load_cost, double store_cost, double compute_cost) const {
return load_cost * bytes_loaded_ + store_cost * bytes_stored_ +
compute_cost * compute_cycles_;
}
// Drop memory access component. Intended for cases when memory accesses are
// sequential or are completely masked by computations.
EIGEN_DEVICE_FUNC void dropMemoryCost() {
bytes_loaded_ = 0;
bytes_stored_ = 0;
}
// TODO(rmlarsen): Define min in terms of total cost, not elementwise.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin(
const TensorOpCost& rhs) const {
double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded());
double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored());
double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles());
return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
}
// TODO(rmlarsen): Define max in terms of total cost, not elementwise.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax(
const TensorOpCost& rhs) const {
double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded());
double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored());
double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles());
return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=(
const TensorOpCost& rhs) {
bytes_loaded_ += rhs.bytes_loaded();
bytes_stored_ += rhs.bytes_stored();
compute_cycles_ += rhs.compute_cycles();
return *this;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) {
bytes_loaded_ *= rhs;
bytes_stored_ *= rhs;
compute_cycles_ *= rhs;
return *this;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+(
TensorOpCost lhs, const TensorOpCost& rhs) {
lhs += rhs;
return lhs;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*(
TensorOpCost lhs, double rhs) {
lhs *= rhs;
return lhs;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*(
double lhs, TensorOpCost rhs) {
rhs *= lhs;
return rhs;
}
friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) {
return os << "[bytes_loaded = " << tc.bytes_loaded()
<< ", bytes_stored = " << tc.bytes_stored()
<< ", compute_cycles = " << tc.compute_cycles() << "]";
}
private:
double bytes_loaded_;
double bytes_stored_;
double compute_cycles_;
};
// TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads
// in [1:max_threads] instead of just switching multi-threading off for small
// work units.
template <typename Device>
class TensorCostModel {
public:
// Scaling from Eigen compute cost to device cycles.
static const int kDeviceCyclesPerComputeCycle = 1;
// Costs in device cycles.
static const int kStartupCycles = 100000;
static const int kPerThreadCycles = 100000;
static const int kTaskSize = 40000;
// Returns the number of threads in [1:max_threads] to use for
// evaluating an expression with the given output size and cost per
// coefficient.
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads(
double output_size, const TensorOpCost& cost_per_coeff, int max_threads) {
double cost = totalCost(output_size, cost_per_coeff);
double threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9;
// Make sure we don't invoke undefined behavior when we convert to an int.
threads = numext::mini<double>(threads, GenericNumTraits<int>::highest());
return numext::mini(max_threads,
numext::maxi<int>(1, static_cast<int>(threads)));
}
// taskSize assesses parallel task size.
// Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task
// granularity needs to be increased to mitigate parallelization overheads.
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize(
double output_size, const TensorOpCost& cost_per_coeff) {
return totalCost(output_size, cost_per_coeff) / kTaskSize;
}
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost(
double output_size, const TensorOpCost& cost_per_coeff) {
// Cost of memory fetches from L2 cache. 64 is typical cache line size.
// 11 is L2 cache latency on Haswell.
// We don't know whether data is in L1, L2 or L3. But we are most interested
// in single-threaded computational time around 100us-10ms (smaller time
// is too small for parallelization, larger time is not interesting
// either because we are probably using all available threads already).
// And for the target time range, L2 seems to be what matters. Data set
// fitting into L1 is too small to take noticeable time. Data set fitting
// only into L3 presumably will take more than 10ms to load and process.
const double kLoadCycles = 1.0 / 64 * 11;
const double kStoreCycles = 1.0 / 64 * 11;
// Scaling from Eigen compute cost to device cycles.
return output_size *
cost_per_coeff.total_cost(kLoadCycles, kStoreCycles,
kDeviceCyclesPerComputeCycle);
}
};
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
| 8,642
| 39.2
| 80
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
namespace Eigen {
/** \class TensorDevice
* \ingroup CXX11_Tensor_Module
*
* \brief Pseudo expression providing an operator = that will evaluate its argument
* on the specified computing 'device' (GPU, thread pool, ...)
*
* Example:
* C.device(EIGEN_GPU) = A + B;
*
* Todo: operator *= and /=.
*/
template <typename ExpressionType, typename DeviceType> class TensorDevice {
public:
TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {}
EIGEN_DEFAULT_COPY_CONSTRUCTOR(TensorDevice)
template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) {
typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
Assign assign(m_expression, other);
internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
return *this;
}
template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) {
typedef typename OtherDerived::Scalar Scalar;
typedef TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ExpressionType, const OtherDerived> Sum;
Sum sum(m_expression, other);
typedef TensorAssignOp<ExpressionType, const Sum> Assign;
Assign assign(m_expression, sum);
internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
return *this;
}
template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) {
typedef typename OtherDerived::Scalar Scalar;
typedef TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const ExpressionType, const OtherDerived> Difference;
Difference difference(m_expression, other);
typedef TensorAssignOp<ExpressionType, const Difference> Assign;
Assign assign(m_expression, difference);
internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device);
return *this;
}
protected:
const DeviceType& m_device;
ExpressionType& m_expression;
};
/** \class TensorAsyncDevice
* \ingroup CXX11_Tensor_Module
*
* \brief Pseudo expression providing an operator = that will evaluate its
* argument asynchronously on the specified device. Currently only
* ThreadPoolDevice implements proper asynchronous execution, while the default
* and GPU devices just run the expression synchronously and call m_done() on
* completion..
*
* Example:
* auto done = []() { ... expression evaluation done ... };
* C.device(thread_pool_device, std::move(done)) = A + B;
*/
template <typename ExpressionType, typename DeviceType, typename DoneCallback>
class TensorAsyncDevice {
public:
TensorAsyncDevice(const DeviceType& device, ExpressionType& expression,
DoneCallback done)
: m_device(device), m_expression(expression), m_done(std::move(done)) {}
template <typename OtherDerived>
EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) {
typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
typedef internal::TensorExecutor<const Assign, DeviceType> Executor;
Assign assign(m_expression, other);
Executor::run(assign, m_device);
m_done();
return *this;
}
protected:
const DeviceType& m_device;
ExpressionType& m_expression;
DoneCallback m_done;
};
#ifdef EIGEN_USE_THREADS
template <typename ExpressionType, typename DoneCallback>
class TensorAsyncDevice<ExpressionType, ThreadPoolDevice, DoneCallback> {
public:
TensorAsyncDevice(const ThreadPoolDevice& device, ExpressionType& expression,
DoneCallback done)
: m_device(device), m_expression(expression), m_done(std::move(done)) {}
template <typename OtherDerived>
EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) {
typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
typedef internal::TensorAsyncExecutor<const Assign, ThreadPoolDevice, DoneCallback> Executor;
// WARNING: After assignment 'm_done' callback will be in undefined state.
Assign assign(m_expression, other);
Executor::runAsync(assign, m_device, std::move(m_done));
return *this;
}
protected:
const ThreadPoolDevice& m_device;
ExpressionType& m_expression;
DoneCallback m_done;
};
#endif
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
| 4,896
| 34.485507
| 127
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
namespace Eigen {
// Default device for the machine (typically a single cpu core)
struct DefaultDevice {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
return internal::aligned_malloc(num_bytes);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
internal::aligned_free(buffer);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const {
return allocate(num_bytes);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const {
deallocate(buffer);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
::memcpy(dst, src, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
::memset(buffer, c, n);
}
template<typename Type>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const {
return data;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const {
#if !defined(EIGEN_GPU_COMPILE_PHASE)
// Running on the host CPU
return 1;
#elif defined(EIGEN_HIP_DEVICE_COMPILE)
// Running on a HIP device
return 64;
#else
// Running on a CUDA device
return 32;
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
// Running on the host CPU
return l1CacheSize();
#elif defined(EIGEN_HIP_DEVICE_COMPILE)
// Running on a HIP device
return 48*1024; // FIXME : update this number for HIP
#else
// Running on a CUDA device, return the amount of shared memory available.
return 48*1024;
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
// Running single threaded on the host CPU
return l3CacheSize();
#elif defined(EIGEN_HIP_DEVICE_COMPILE)
// Running on a HIP device
return firstLevelCacheSize(); // FIXME : update this number for HIP
#else
// Running on a CUDA device
return firstLevelCacheSize();
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
#if !defined(EIGEN_GPU_COMPILE_PHASE)
// Running single threaded on the host CPU
// Should return an enum that encodes the ISA supported by the CPU
return 1;
#elif defined(EIGEN_HIP_DEVICE_COMPILE)
// Running on a HIP device
// return 1 as major for HIP
return 1;
#else
// Running on a CUDA device
return EIGEN_CUDA_ARCH / 100;
#endif
}
};
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
| 3,427
| 31.647619
| 109
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H
// This header file container defines fo gpu* macros which will resolve to
// their equivalent hip* or cuda* versions depending on the compiler in use
// A separate header (included at the end of this file) will undefine all
#include "TensorGpuHipCudaDefines.h"
namespace Eigen {
static const int kGpuScratchSize = 1024;
// This defines an interface that GPUDevice can take to use
// HIP / CUDA streams underneath.
class StreamInterface {
public:
virtual ~StreamInterface() {}
virtual const gpuStream_t& stream() const = 0;
virtual const gpuDeviceProp_t& deviceProperties() const = 0;
// Allocate memory on the actual device where the computation will run
virtual void* allocate(size_t num_bytes) const = 0;
virtual void deallocate(void* buffer) const = 0;
// Return a scratchpad buffer of size 1k
virtual void* scratchpad() const = 0;
// Return a semaphore. The semaphore is initially initialized to 0, and
// each kernel using it is responsible for resetting to 0 upon completion
// to maintain the invariant that the semaphore is always equal to 0 upon
// each kernel start.
virtual unsigned int* semaphore() const = 0;
};
class GpuDeviceProperties {
public:
GpuDeviceProperties() :
initialized_(false), first_(true), device_properties_(nullptr) {}
~GpuDeviceProperties() {
if (device_properties_) {
delete[] device_properties_;
}
}
EIGEN_STRONG_INLINE const gpuDeviceProp_t& get(int device) const {
return device_properties_[device];
}
EIGEN_STRONG_INLINE bool isInitialized() const {
return initialized_;
}
void initialize() {
if (!initialized_) {
// Attempts to ensure proper behavior in the case of multiple threads
// calling this function simultaneously. This would be trivial to
// implement if we could use std::mutex, but unfortunately mutex don't
// compile with nvcc, so we resort to atomics and thread fences instead.
// Note that if the caller uses a compiler that doesn't support c++11 we
// can't ensure that the initialization is thread safe.
if (first_.exchange(false)) {
// We're the first thread to reach this point.
int num_devices;
gpuError_t status = gpuGetDeviceCount(&num_devices);
if (status != gpuSuccess) {
std::cerr << "Failed to get the number of GPU devices: "
<< gpuGetErrorString(status)
<< std::endl;
gpu_assert(status == gpuSuccess);
}
device_properties_ = new gpuDeviceProp_t[num_devices];
for (int i = 0; i < num_devices; ++i) {
status = gpuGetDeviceProperties(&device_properties_[i], i);
if (status != gpuSuccess) {
std::cerr << "Failed to initialize GPU device #"
<< i
<< ": "
<< gpuGetErrorString(status)
<< std::endl;
gpu_assert(status == gpuSuccess);
}
}
std::atomic_thread_fence(std::memory_order_release);
initialized_ = true;
} else {
// Wait for the other thread to inititialize the properties.
while (!initialized_) {
std::atomic_thread_fence(std::memory_order_acquire);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}
}
}
}
private:
volatile bool initialized_;
std::atomic<bool> first_;
gpuDeviceProp_t* device_properties_;
};
EIGEN_ALWAYS_INLINE const GpuDeviceProperties& GetGpuDeviceProperties() {
static GpuDeviceProperties* deviceProperties = new GpuDeviceProperties();
if (!deviceProperties->isInitialized()) {
deviceProperties->initialize();
}
return *deviceProperties;
}
EIGEN_ALWAYS_INLINE const gpuDeviceProp_t& GetGpuDeviceProperties(int device) {
return GetGpuDeviceProperties().get(device);
}
static const gpuStream_t default_stream = gpuStreamDefault;
class GpuStreamDevice : public StreamInterface {
public:
// Use the default stream on the current device
GpuStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) {
gpuGetDevice(&device_);
}
// Use the default stream on the specified device
GpuStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {}
// Use the specified stream. Note that it's the
// caller responsibility to ensure that the stream can run on
// the specified device. If no device is specified the code
// assumes that the stream is associated to the current gpu device.
GpuStreamDevice(const gpuStream_t* stream, int device = -1)
: stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) {
if (device < 0) {
gpuGetDevice(&device_);
} else {
int num_devices;
gpuError_t err = gpuGetDeviceCount(&num_devices);
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
gpu_assert(device < num_devices);
device_ = device;
}
}
virtual ~GpuStreamDevice() {
if (scratch_) {
deallocate(scratch_);
}
}
const gpuStream_t& stream() const { return *stream_; }
const gpuDeviceProp_t& deviceProperties() const {
return GetGpuDeviceProperties(device_);
}
virtual void* allocate(size_t num_bytes) const {
gpuError_t err = gpuSetDevice(device_);
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
void* result;
err = gpuMalloc(&result, num_bytes);
gpu_assert(err == gpuSuccess);
gpu_assert(result != NULL);
return result;
}
virtual void deallocate(void* buffer) const {
gpuError_t err = gpuSetDevice(device_);
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
gpu_assert(buffer != NULL);
err = gpuFree(buffer);
gpu_assert(err == gpuSuccess);
}
virtual void* scratchpad() const {
if (scratch_ == NULL) {
scratch_ = allocate(kGpuScratchSize + sizeof(unsigned int));
}
return scratch_;
}
virtual unsigned int* semaphore() const {
if (semaphore_ == NULL) {
char* scratch = static_cast<char*>(scratchpad()) + kGpuScratchSize;
semaphore_ = reinterpret_cast<unsigned int*>(scratch);
gpuError_t err = gpuMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_);
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
}
return semaphore_;
}
private:
const gpuStream_t* stream_;
int device_;
mutable void* scratch_;
mutable unsigned int* semaphore_;
};
struct GpuDevice {
// The StreamInterface is not owned: the caller is
// responsible for its initialization and eventual destruction.
explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) {
eigen_assert(stream);
}
explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) {
eigen_assert(stream);
}
// TODO(bsteiner): This is an internal API, we should not expose it.
EIGEN_STRONG_INLINE const gpuStream_t& stream() const {
return stream_->stream();
}
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
return stream_->allocate(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
stream_->deallocate(buffer);
}
EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const {
return stream_->allocate(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const {
stream_->deallocate(buffer);
}
template<typename Type>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const {
return data;
}
EIGEN_STRONG_INLINE void* scratchpad() const {
return stream_->scratchpad();
}
EIGEN_STRONG_INLINE unsigned int* semaphore() const {
return stream_->semaphore();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
#ifndef EIGEN_GPU_COMPILE_PHASE
gpuError_t err = gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToDevice,
stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
#else
EIGEN_UNUSED_VARIABLE(dst);
EIGEN_UNUSED_VARIABLE(src);
EIGEN_UNUSED_VARIABLE(n);
eigen_assert(false && "The default device should be used instead to generate kernel code");
#endif
}
EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
gpuError_t err =
gpuMemcpyAsync(dst, src, n, gpuMemcpyHostToDevice, stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
}
EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
gpuError_t err =
gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToHost, stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
#ifndef EIGEN_GPU_COMPILE_PHASE
gpuError_t err = gpuMemsetAsync(buffer, c, n, stream_->stream());
EIGEN_UNUSED_VARIABLE(err)
gpu_assert(err == gpuSuccess);
#else
eigen_assert(false && "The default device should be used instead to generate kernel code");
#endif
}
EIGEN_STRONG_INLINE size_t numThreads() const {
// FIXME
return 32;
}
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
// FIXME
return 48*1024;
}
EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
// We won't try to take advantage of the l2 cache for the time being, and
// there is no l3 cache on hip/cuda devices.
return firstLevelCacheSize();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
#ifndef EIGEN_GPU_COMPILE_PHASE
gpuError_t err = gpuStreamSynchronize(stream_->stream());
if (err != gpuSuccess) {
std::cerr << "Error detected in GPU stream: "
<< gpuGetErrorString(err)
<< std::endl;
gpu_assert(err == gpuSuccess);
}
#else
gpu_assert(false && "The default device should be used instead to generate kernel code");
#endif
}
EIGEN_STRONG_INLINE int getNumGpuMultiProcessors() const {
return stream_->deviceProperties().multiProcessorCount;
}
EIGEN_STRONG_INLINE int maxGpuThreadsPerBlock() const {
return stream_->deviceProperties().maxThreadsPerBlock;
}
EIGEN_STRONG_INLINE int maxGpuThreadsPerMultiProcessor() const {
return stream_->deviceProperties().maxThreadsPerMultiProcessor;
}
EIGEN_STRONG_INLINE int sharedMemPerBlock() const {
return stream_->deviceProperties().sharedMemPerBlock;
}
EIGEN_STRONG_INLINE int majorDeviceVersion() const {
return stream_->deviceProperties().major;
}
EIGEN_STRONG_INLINE int minorDeviceVersion() const {
return stream_->deviceProperties().minor;
}
EIGEN_STRONG_INLINE int maxBlocks() const {
return max_blocks_;
}
// This function checks if the GPU runtime recorded an error for the
// underlying stream device.
inline bool ok() const {
#ifdef EIGEN_GPUCC
gpuError_t error = gpuStreamQuery(stream_->stream());
return (error == gpuSuccess) || (error == gpuErrorNotReady);
#else
return false;
#endif
}
private:
const StreamInterface* stream_;
int max_blocks_;
};
#if defined(EIGEN_HIPCC)
#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
hipLaunchKernelGGL(kernel, dim3(gridsize), dim3(blocksize), (sharedmem), (device).stream(), __VA_ARGS__); \
gpu_assert(hipGetLastError() == hipSuccess);
#else
#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
gpu_assert(cudaGetLastError() == cudaSuccess);
#endif
// FIXME: Should be device and kernel specific.
#ifdef EIGEN_GPUCC
static EIGEN_DEVICE_FUNC inline void setGpuSharedMemConfig(gpuSharedMemConfig config) {
#ifndef EIGEN_GPU_COMPILE_PHASE
gpuError_t status = gpuDeviceSetSharedMemConfig(config);
EIGEN_UNUSED_VARIABLE(status)
gpu_assert(status == gpuSuccess);
#else
EIGEN_UNUSED_VARIABLE(config)
#endif
}
#endif
} // end namespace Eigen
// undefine all the gpu* macros we defined at the beginning of the file
#include "TensorGpuHipCudaUndefines.h"
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H
| 12,837
| 31.917949
| 112
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H)
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
namespace Eigen {
// Runs an arbitrary function and then calls Notify() on the passed in
// Notification.
template <typename Function, typename... Args> struct FunctionWrapperWithNotification
{
static void run(Notification* n, Function f, Args... args) {
f(args...);
if (n) {
n->Notify();
}
}
};
template <typename Function, typename... Args> struct FunctionWrapperWithBarrier
{
static void run(Barrier* b, Function f, Args... args) {
f(args...);
if (b) {
b->Notify();
}
}
};
template <typename SyncType>
static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) {
if (n) {
n->Wait();
}
}
// An abstract interface to a device specific memory allocator.
class Allocator {
public:
virtual ~Allocator() {}
virtual void* allocate(size_t num_bytes) const = 0;
virtual void deallocate(void* buffer) const = 0;
};
// Build a thread pool device on top the an existing pool of threads.
struct ThreadPoolDevice {
// The ownership of the thread pool remains with the caller.
ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores, Allocator* allocator = nullptr)
: pool_(pool), num_threads_(num_cores), allocator_(allocator) { }
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
return allocator_ ? allocator_->allocate(num_bytes)
: internal::aligned_malloc(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate(void* buffer) const {
if (allocator_) {
allocator_->deallocate(buffer);
} else {
internal::aligned_free(buffer);
}
}
EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const {
return allocate(num_bytes);
}
EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const {
deallocate(buffer);
}
template<typename Type>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const {
return data;
}
EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const {
#ifdef __ANDROID__
::memcpy(dst, src, n);
#else
// TODO(rmlarsen): Align blocks on cache lines.
// We have observed that going beyond 4 threads usually just wastes
// CPU cycles due to the threads competing for memory bandwidth, so we
// statically schedule at most 4 block copies here.
const size_t kMinBlockSize = 32768;
const size_t num_threads = CostModel::numThreads(n, TensorOpCost(1.0, 1.0, 0), 4);
if (n <= kMinBlockSize || num_threads < 2) {
::memcpy(dst, src, n);
} else {
const char* src_ptr = static_cast<const char*>(src);
char* dst_ptr = static_cast<char*>(dst);
const size_t blocksize = (n + (num_threads - 1)) / num_threads;
Barrier barrier(static_cast<int>(num_threads - 1));
// Launch the last 3 blocks on worker threads.
for (size_t i = 1; i < num_threads; ++i) {
enqueue_with_barrier(&barrier, [n, i, src_ptr, dst_ptr, blocksize] {
::memcpy(dst_ptr + i * blocksize, src_ptr + i * blocksize,
numext::mini(blocksize, n - (i * blocksize)));
});
}
// Launch the first block on the main thread.
::memcpy(dst_ptr, src_ptr, blocksize);
barrier.Wait();
}
#endif
}
EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const {
memcpy(dst, src, n);
}
EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const {
::memset(buffer, c, n);
}
EIGEN_STRONG_INLINE int numThreads() const {
return num_threads_;
}
// Number of theads available in the underlying thread pool. This number can
// be different from the value returned by numThreads().
EIGEN_STRONG_INLINE int numThreadsInPool() const {
return pool_->NumThreads();
}
EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const {
return l1CacheSize();
}
EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const {
// The l3 cache size is shared between all the cores.
return l3CacheSize() / num_threads_;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
// Should return an enum that encodes the ISA supported by the CPU
return 1;
}
template <class Function, class... Args>
EIGEN_STRONG_INLINE Notification* enqueue(Function&& f,
Args&&... args) const {
Notification* n = new Notification();
pool_->Schedule(
std::bind(&FunctionWrapperWithNotification<Function, Args...>::run, n,
std::move(f), args...));
return n;
}
template <class Function, class... Args>
EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b, Function&& f,
Args&&... args) const {
pool_->Schedule(
std::bind(&FunctionWrapperWithBarrier<Function, Args...>::run, b,
std::move(f), args...));
}
template <class Function, class... Args>
EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f,
Args&&... args) const {
if (sizeof...(args) > 0) {
pool_->Schedule(std::bind(std::move(f), args...));
} else {
pool_->Schedule(std::move(f));
}
}
// Returns a logical thread index between 0 and pool_->NumThreads() - 1 if
// called from one of the threads in pool_. Returns -1 otherwise.
EIGEN_STRONG_INLINE int currentThreadId() const {
return pool_->CurrentThreadId();
}
// WARNING: This function is synchronous and will block the calling thread.
//
// Synchronous parallelFor executes f with [0, n) arguments in parallel and
// waits for completion. F accepts a half-open interval [first, last). Block
// size is chosen based on the iteration cost and resulting parallel
// efficiency. If block_align is not nullptr, it is called to round up the
// block size.
void parallelFor(Index n, const TensorOpCost& cost,
std::function<Index(Index)> block_align,
std::function<void(Index, Index)> f) const {
if (EIGEN_PREDICT_FALSE(n <= 0)){
return;
// Compute small problems directly in the caller thread.
} else if (n == 1 || numThreads() == 1 ||
CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) {
f(0, n);
return;
}
// Compute block size and total count of blocks.
ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align);
// Recursively divide size into halves until we reach block_size.
// Division code rounds mid to block_size, so we are guaranteed to get
// block_count leaves that do actual computations.
Barrier barrier(static_cast<unsigned int>(block.count));
std::function<void(Index, Index)> handleRange;
handleRange = [=, &handleRange, &barrier, &f](Index firstIdx,
Index lastIdx) {
while (lastIdx - firstIdx > block.size) {
// Split into halves and schedule the second half on a different thread.
const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size;
pool_->Schedule([=, &handleRange]() { handleRange(midIdx, lastIdx); });
lastIdx = midIdx;
}
// Single block or less, execute directly.
f(firstIdx, lastIdx);
barrier.Notify();
};
if (block.count <= numThreads()) {
// Avoid a thread hop by running the root of the tree and one block on the
// main thread.
handleRange(0, n);
} else {
// Execute the root in the thread pool to avoid running work on more than
// numThreads() threads.
pool_->Schedule([=, &handleRange]() { handleRange(0, n); });
}
barrier.Wait();
}
// Convenience wrapper for parallelFor that does not align blocks.
void parallelFor(Index n, const TensorOpCost& cost,
std::function<void(Index, Index)> f) const {
parallelFor(n, cost, nullptr, std::move(f));
}
// WARNING: This function is asynchronous and will not block the calling thread.
//
// Asynchronous parallelFor executes f with [0, n) arguments in parallel
// without waiting for completion. When the last block finished, it will call
// 'done' callback. F accepts a half-open interval [first, last). Block size
// is chosen based on the iteration cost and resulting parallel efficiency. If
// block_align is not nullptr, it is called to round up the block size.
void parallelForAsync(Index n, const TensorOpCost& cost,
std::function<Index(Index)> block_align,
std::function<void(Index, Index)> f,
std::function<void()> done) const {
// Compute small problems directly in the caller thread.
if (n <= 1 || numThreads() == 1 ||
CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) {
f(0, n);
done();
return;
}
// Compute block size and total count of blocks.
ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align);
ParallelForAsyncContext* const ctx =
new ParallelForAsyncContext(block.count, std::move(f), std::move(done));
// Recursively divide size into halves until we reach block_size.
// Division code rounds mid to block_size, so we are guaranteed to get
// block_count leaves that do actual computations.
ctx->handle_range = [this, ctx, block](Index firstIdx, Index lastIdx) {
while (lastIdx - firstIdx > block.size) {
// Split into halves and schedule the second half on a different thread.
const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size;
pool_->Schedule(
[ctx, midIdx, lastIdx]() { ctx->handle_range(midIdx, lastIdx); });
lastIdx = midIdx;
}
// Single block or less, execute directly.
ctx->f(firstIdx, lastIdx);
// Delete async context if it was the last block.
if (ctx->count.fetch_sub(1) == 1) delete ctx;
};
if (block.count <= numThreads()) {
// Avoid a thread hop by running the root of the tree and one block on the
// main thread.
ctx->handle_range(0, n);
} else {
// Execute the root in the thread pool to avoid running work on more than
// numThreads() threads.
pool_->Schedule([ctx, n]() { ctx->handle_range(0, n); });
}
}
// Convenience wrapper for parallelForAsync that does not align blocks.
void parallelForAsync(Index n, const TensorOpCost& cost,
std::function<void(Index, Index)> f,
std::function<void()> done) const {
parallelForAsync(n, cost, nullptr, std::move(f), std::move(done));
}
// Thread pool accessor.
ThreadPoolInterface* getPool() const { return pool_; }
// Allocator accessor.
Allocator* allocator() const { return allocator_; }
private:
typedef TensorCostModel<ThreadPoolDevice> CostModel;
// For parallelForAsync we must keep passed in closures on the heap, and
// delete them only after `done` callback finished.
struct ParallelForAsyncContext {
ParallelForAsyncContext(Index block_count,
std::function<void(Index, Index)> block_f,
std::function<void()> done_callback)
: count(block_count),
f(std::move(block_f)),
done(std::move(done_callback)) {}
~ParallelForAsyncContext() { done(); }
std::atomic<Index> count;
std::function<void(Index, Index)> f;
std::function<void()> done;
std::function<void(Index, Index)> handle_range;
};
struct ParallelForBlock {
Index size; // block size
Index count; // number of blocks
};
// Calculates block size based on (1) the iteration cost and (2) parallel
// efficiency. We want blocks to be not too small to mitigate parallelization
// overheads; not too large to mitigate tail effect and potential load
// imbalance and we also want number of blocks to be evenly dividable across
// threads.
ParallelForBlock CalculateParallelForBlock(
const Index n, const TensorOpCost& cost,
std::function<Index(Index)> block_align) const {
const double block_size_f = 1.0 / CostModel::taskSize(1, cost);
const Index max_oversharding_factor = 4;
Index block_size = numext::mini(
n, numext::maxi<Index>(
divup<Index>(n, max_oversharding_factor * numThreads()),
block_size_f));
const Index max_block_size = numext::mini(n, 2 * block_size);
if (block_align) {
Index new_block_size = block_align(block_size);
eigen_assert(new_block_size >= block_size);
block_size = numext::mini(n, new_block_size);
}
Index block_count = divup(n, block_size);
// Calculate parallel efficiency as fraction of total CPU time used for
// computations:
double max_efficiency =
static_cast<double>(block_count) /
(divup<int>(block_count, numThreads()) * numThreads());
// Now try to increase block size up to max_block_size as long as it
// doesn't decrease parallel efficiency.
for (Index prev_block_count = block_count;
max_efficiency < 1.0 && prev_block_count > 1;) {
// This is the next block size that divides size into a smaller number
// of blocks than the current block_size.
Index coarser_block_size = divup(n, prev_block_count - 1);
if (block_align) {
Index new_block_size = block_align(coarser_block_size);
eigen_assert(new_block_size >= coarser_block_size);
coarser_block_size = numext::mini(n, new_block_size);
}
if (coarser_block_size > max_block_size) {
break; // Reached max block size. Stop.
}
// Recalculate parallel efficiency.
const Index coarser_block_count = divup(n, coarser_block_size);
eigen_assert(coarser_block_count < prev_block_count);
prev_block_count = coarser_block_count;
const double coarser_efficiency =
static_cast<double>(coarser_block_count) /
(divup<int>(coarser_block_count, numThreads()) * numThreads());
if (coarser_efficiency + 0.01 >= max_efficiency) {
// Taking it.
block_size = coarser_block_size;
block_count = coarser_block_count;
if (max_efficiency < coarser_efficiency) {
max_efficiency = coarser_efficiency;
}
}
}
return {block_size, block_count};
}
ThreadPoolInterface* pool_;
int num_threads_;
Allocator* allocator_;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
| 15,203
| 36.082927
| 97
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
namespace Eigen {
/** \internal
*
* \class TensorDimensionList
* \ingroup CXX11_Tensor_Module
*
* \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n.
*
* \sa Tensor
*/
template <typename Index, std::size_t Rank> struct DimensionList {
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
const Index operator[] (const Index i) const { return i; }
};
namespace internal {
template<typename Index, std::size_t Rank> struct array_size<DimensionList<Index, Rank> > {
static const size_t value = Rank;
};
template<typename Index, std::size_t Rank> struct array_size<const DimensionList<Index, Rank> > {
static const size_t value = Rank;
};
template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(DimensionList<Index, Rank>&) {
return n;
}
template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(const DimensionList<Index, Rank>&) {
return n;
}
#if EIGEN_HAS_CONSTEXPR
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<DimensionList<Index, Rank> > {
static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i == value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i == value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i != value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<const DimensionList<Index, Rank> > {
static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i != value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i > value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i > value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i < value;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) {
return i < value;
}
};
#else
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > {
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
return true;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_eq_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_ne_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_gt_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
template <typename Index, std::size_t Rank>
struct index_statically_lt_impl<const DimensionList<Index, Rank> > {
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
return false;
}
};
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
| 7,674
| 31.383966
| 115
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
namespace Eigen {
/** \class TensorForcedEval
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor reshaping class.
*
*
*/
namespace internal {
template<typename XprType>
struct traits<TensorForcedEvalOp<XprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
enum {
Flags = 0
};
};
template<typename XprType>
struct eval<TensorForcedEvalOp<XprType>, Eigen::Dense>
{
typedef const TensorForcedEvalOp<XprType>& type;
};
template<typename XprType>
struct nested<TensorForcedEvalOp<XprType>, 1, typename eval<TensorForcedEvalOp<XprType> >::type>
{
typedef TensorForcedEvalOp<XprType> type;
};
} // end namespace internal
template<typename XprType>
class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr)
: m_xpr(expr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
namespace internal {
template <typename Device, typename CoeffReturnType>
struct non_integral_type_placement_new{
template <typename StorageType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index numValues, StorageType m_buffer) {
// Initialize non-trivially constructible types.
if (!internal::is_arithmetic<CoeffReturnType>::value) {
for (Index i = 0; i < numValues; ++i) new (m_buffer + i) CoeffReturnType();
}
}
};
// SYCL does not support non-integral types
// having new (m_buffer + i) CoeffReturnType() causes the following compiler error for SYCL Devices
// no matching function for call to 'operator new'
template <typename CoeffReturnType>
struct non_integral_type_placement_new<Eigen::SyclDevice, CoeffReturnType> {
template <typename StorageType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index, StorageType) {
}
};
} // end namespace internal
template<typename ArgType_, typename Device>
struct TensorEvaluator<const TensorForcedEvalOp<ArgType_>, Device>
{
typedef const typename internal::remove_all<ArgType_>::type ArgType;
typedef TensorForcedEvalOp<ArgType> XprType;
typedef typename ArgType::Scalar Scalar;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef typename Eigen::internal::traits<XprType>::PointerType TensorPointerType;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = true,
PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = internal::is_arithmetic<CoeffReturnType>::value,
PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = true
};
static const int NumDims = internal::traits<ArgType>::NumDimensions;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
Layout, Index>
TensorBlock;
//===--------------------------------------------------------------------===//
TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_op(op.expression()),
m_device(device), m_buffer(NULL)
{ }
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
const Index numValues = internal::array_prod(m_impl.dimensions());
m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType)));
internal::non_integral_type_placement_new<Device, CoeffReturnType>()(numValues, m_buffer);
typedef TensorEvalToOp< const typename internal::remove_const<ArgType>::type > EvalTo;
EvalTo evalToTmp(m_device.get(m_buffer), m_op);
internal::TensorExecutor<
const EvalTo, typename internal::remove_const<Device>::type,
/*Vectorizable=*/internal::IsVectorizable<Device, const ArgType>::value,
/*Tiling=*/internal::IsTileable<Device, const ArgType>::value>::
run(evalToTmp, m_device);
return true;
}
#ifdef EIGEN_USE_THREADS
template <typename EvalSubExprsCallback>
EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
EvaluatorPointerType, EvalSubExprsCallback done) {
const Index numValues = internal::array_prod(m_impl.dimensions());
m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(
numValues * sizeof(CoeffReturnType)));
typedef TensorEvalToOp<const typename internal::remove_const<ArgType>::type>
EvalTo;
EvalTo evalToTmp(m_device.get(m_buffer), m_op);
auto on_done = std::bind([](EvalSubExprsCallback done_) { done_(true); },
std::move(done));
internal::TensorAsyncExecutor<
const EvalTo, typename internal::remove_const<Device>::type,
decltype(on_done),
/*Vectorizable=*/internal::IsVectorizable<Device, const ArgType>::value,
/*Tiling=*/internal::IsTileable<Device, const ArgType>::value>::
runAsync(evalToTmp, m_device, std::move(on_done));
}
#endif
EIGEN_STRONG_INLINE void cleanup() {
m_device.deallocate_temp(m_buffer);
m_buffer = NULL;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_buffer[index];
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
return internal::TensorBlockResourceRequirements::any();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool /*root_of_expr_ast*/ = false) const {
assert(m_buffer != NULL);
return TensorBlock::materialize(m_buffer, m_impl.dimensions(), desc, scratch);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
EvaluatorPointerType data() const { return m_buffer; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_buffer.bind(cgh);
m_impl.bind(cgh);
}
#endif
private:
TensorEvaluator<ArgType, Device> m_impl;
const ArgType m_op;
const Device EIGEN_DEVICE_REF m_device;
EvaluatorPointerType m_buffer;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
| 8,782
| 35.903361
| 107
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
namespace Eigen {
// MakePointer class is used as a container of the address space of the pointer
// on the host and on the device. From the host side it generates the T* pointer
// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to
// T* m_data on the host. It is always called on the device.
// Specialisation of MakePointer class for creating the sycl buffer with
// map_allocator.
template<typename T> struct MakePointer {
typedef T* Type;
typedef const T* ConstType;
};
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* constCast(const T* data) {
return const_cast<T*>(data);
}
// The StorageMemory class is a container of the device specific pointer
// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression
// is a device-agnostic type and need MakePointer class for type conversion,
// the TensorEvaluator class can be specialized for a device, hence it is possible
// to construct different types of temproray storage memory in TensorEvaluator
// for different devices by specializing the following StorageMemory class.
template<typename T, typename device> struct StorageMemory: MakePointer <T> {};
namespace internal{
template<typename A, typename B> struct Pointer_type_promotion {
static const bool val=false;
};
template<typename A> struct Pointer_type_promotion<A, A> {
static const bool val = true;
};
template<typename A, typename B> struct TypeConversion {
typedef A* type;
};
}
template<typename PlainObjectType, int Options_ = Unaligned, template <class> class MakePointer_ = MakePointer> class TensorMap;
template<typename Scalar_, int NumIndices_, int Options_ = 0, typename IndexType = DenseIndex> class Tensor;
template<typename Scalar_, typename Dimensions, int Options_ = 0, typename IndexType = DenseIndex> class TensorFixedSize;
template<typename PlainObjectType> class TensorRef;
template<typename Derived, int AccessLevel> class TensorBase;
template<typename NullaryOp, typename PlainObjectType> class TensorCwiseNullaryOp;
template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType> class TensorCwiseTernaryOp;
template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp;
template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_ = MakePointer > class TensorReductionOp;
template<typename XprType> class TensorIndexTupleOp;
template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp;
template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
template<typename Dimensions, typename LeftXprType, typename RightXprType, typename OutputKernelType> class TensorContractionOp;
template<typename TargetType, typename XprType> class TensorConversionOp;
template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp;
template<typename FFT, typename XprType, int FFTDataType, int FFTDirection> class TensorFFTOp;
template<typename PatchDim, typename XprType> class TensorPatchOp;
template<DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorImagePatchOp;
template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorVolumePatchOp;
template<typename Broadcast, typename XprType> class TensorBroadcastingOp;
template<DenseIndex DimId, typename XprType> class TensorChippingOp;
template<typename NewDimensions, typename XprType> class TensorReshapingOp;
template<typename XprType> class TensorLayoutSwapOp;
template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp;
template<typename ReverseDimensions, typename XprType> class TensorReverseOp;
template<typename PaddingDimensions, typename XprType> class TensorPaddingOp;
template<typename Shuffle, typename XprType> class TensorShufflingOp;
template<typename Strides, typename XprType> class TensorStridingOp;
template<typename StartIndices, typename StopIndices, typename Strides, typename XprType> class TensorStridingSlicingOp;
template<typename Strides, typename XprType> class TensorInflationOp;
template<typename Generator, typename XprType> class TensorGeneratorOp;
template<typename LeftXprType, typename RightXprType> class TensorAssignOp;
template<typename Op, typename XprType> class TensorScanOp;
template<typename Dims, typename XprType> class TensorTraceOp;
template<typename CustomUnaryFunc, typename XprType> class TensorCustomUnaryOp;
template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType> class TensorCustomBinaryOp;
template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorEvalToOp;
template<typename XprType> class TensorForcedEvalOp;
template<typename ExpressionType, typename DeviceType> class TensorDevice;
template<typename ExpressionType, typename DeviceType, typename DoneCallback> class TensorAsyncDevice;
template<typename Derived, typename Device> struct TensorEvaluator;
struct NoOpOutputKernel;
struct DefaultDevice;
struct ThreadPoolDevice;
struct GpuDevice;
struct SyclDevice;
#ifdef EIGEN_USE_SYCL
template <typename T> struct MakeSYCLPointer {
typedef Eigen::TensorSycl::internal::RangeAccess<cl::sycl::access::mode::read_write, T> Type;
};
template <typename T>
EIGEN_STRONG_INLINE const Eigen::TensorSycl::internal::RangeAccess<cl::sycl::access::mode::read_write, T>&
constCast(const Eigen::TensorSycl::internal::RangeAccess<cl::sycl::access::mode::read_write, T>& data) {
return data;
}
template <typename T>
struct StorageMemory<T, SyclDevice> : MakeSYCLPointer<T> {};
template <typename T>
struct StorageMemory<T, const SyclDevice> : StorageMemory<T, SyclDevice> {};
namespace TensorSycl {
namespace internal{
template <typename Evaluator, typename Op> class GenericNondeterministicReducer;
}
}
#endif
enum FFTResultType {
RealPart = 0,
ImagPart = 1,
BothParts = 2
};
enum FFTDirection {
FFT_FORWARD = 0,
FFT_REVERSE = 1
};
namespace internal {
template <typename Device, typename Expression>
struct IsVectorizable {
static const bool value = TensorEvaluator<Expression, Device>::PacketAccess;
};
template <typename Expression>
struct IsVectorizable<GpuDevice, Expression> {
static const bool value = TensorEvaluator<Expression, GpuDevice>::PacketAccess &&
TensorEvaluator<Expression, GpuDevice>::IsAligned;
};
// Tiled evaluation strategy.
enum TiledEvaluation {
Off = 0, // tiled evaluation is not supported
On = 1, // still work in progress (see TensorBlock.h)
};
template <typename Device, typename Expression>
struct IsTileable {
// Check that block evaluation is supported and it's a preferred option (at
// least one sub-expression has much faster block evaluation, e.g.
// broadcasting).
static const bool BlockAccess =
TensorEvaluator<Expression, Device>::BlockAccess &&
TensorEvaluator<Expression, Device>::PreferBlockAccess;
static const TiledEvaluation value =
BlockAccess ? TiledEvaluation::On : TiledEvaluation::Off;
};
template <typename Expression, typename Device,
bool Vectorizable = IsVectorizable<Device, Expression>::value,
TiledEvaluation Tiling = IsTileable<Device, Expression>::value>
class TensorExecutor;
template <typename Expression, typename Device, typename DoneCallback,
bool Vectorizable = IsVectorizable<Device, Expression>::value,
TiledEvaluation Tiling = IsTileable<Device, Expression>::value>
class TensorAsyncExecutor;
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
| 8,320
| 42.338542
| 131
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
#define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
namespace Eigen {
namespace internal {
/** \internal
* \brief Template functor to compute the modulo between an array and a scalar.
*/
template <typename Scalar>
struct scalar_mod_op {
EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a % m_divisor; }
const Scalar m_divisor;
};
template <typename Scalar>
struct functor_traits<scalar_mod_op<Scalar> >
{ enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; };
/** \internal
* \brief Template functor to compute the modulo between 2 arrays.
*/
template <typename Scalar>
struct scalar_mod2_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; }
};
template <typename Scalar>
struct functor_traits<scalar_mod2_op<Scalar> >
{ enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; };
template <typename Scalar>
struct scalar_fmod_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar
operator()(const Scalar& a, const Scalar& b) const {
return numext::fmod(a, b);
}
};
template <typename Scalar>
struct functor_traits<scalar_fmod_op<Scalar> > {
enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell.
PacketAccess = false };
};
template<typename Reducer, typename Device>
struct reducer_traits {
enum {
Cost = 1,
PacketAccess = false,
IsStateful = false,
IsExactlyAssociative = true
};
};
// Standard reduction functors
template <typename T> struct SumReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
internal::scalar_sum_op<T> sum_op;
*accum = sum_op(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = padd<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
internal::scalar_cast_op<int, T> conv;
return conv(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_sum_op<T> sum_op;
return sum_op(saccum, predux(vaccum));
}
};
template <typename T, typename Device>
struct reducer_traits<SumReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasAdd,
IsStateful = false,
IsExactlyAssociative = NumTraits<T>::IsInteger
};
};
template <typename T> struct MeanReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
MeanReducer() : scalarCount_(0), packetCount_(0) { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) {
internal::scalar_sum_op<T> sum_op;
*accum = sum_op(*accum, t);
scalarCount_++;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) {
(*accum) = padd<Packet>(*accum, p);
packetCount_++;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
internal::scalar_cast_op<int, T> conv;
return conv(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
internal::scalar_quotient_op<T> quotient_op;
return quotient_op(accum, T(scalarCount_));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return pdiv(vaccum, pset1<Packet>(T(packetCount_)));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_sum_op<T> sum_op;
internal::scalar_quotient_op<T> quotient_op;
return quotient_op(
sum_op(saccum, predux(vaccum)),
T(scalarCount_ + packetCount_ * unpacket_traits<Packet>::size));
}
protected:
DenseIndex scalarCount_;
DenseIndex packetCount_;
};
template <typename T, typename Device>
struct reducer_traits<MeanReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasAdd &&
PacketType<T, Device>::HasDiv && !NumTraits<T>::IsInteger,
IsStateful = true,
IsExactlyAssociative = NumTraits<T>::IsInteger
};
};
template <typename T, bool IsMax = true, bool IsInteger = true>
struct MinMaxBottomValue {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return Eigen::NumTraits<T>::lowest();
}
};
template <typename T>
struct MinMaxBottomValue<T, true, false> {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return -Eigen::NumTraits<T>::infinity();
}
};
template <typename T>
struct MinMaxBottomValue<T, false, true> {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return Eigen::NumTraits<T>::highest();
}
};
template <typename T>
struct MinMaxBottomValue<T, false, false> {
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() {
return Eigen::NumTraits<T>::infinity();
}
};
template <typename T, int NaNPropagation=PropagateFast> struct MaxReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
scalar_max_op<T, T, NaNPropagation> op;
*accum = op(t, *accum);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
scalar_max_op<T, T, NaNPropagation> op;
(*accum) = op.packetOp(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return MinMaxBottomValue<T, /*IsMax=*/true, Eigen::NumTraits<T>::IsInteger>::bottom_value();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
scalar_max_op<T, T, NaNPropagation> op;
return op(saccum, op.predux(vaccum));
}
};
template <typename T, typename Device, int NaNPropagation>
struct reducer_traits<MaxReducer<T, NaNPropagation>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasMax,
IsStateful = false,
IsExactlyAssociative = (NaNPropagation!=PropagateFast)
};
};
template <typename T, int NaNPropagation=PropagateFast> struct MinReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
scalar_min_op<T, T, NaNPropagation> op;
*accum = op(t, *accum);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
scalar_min_op<T, T, NaNPropagation> op;
(*accum) = op.packetOp(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return MinMaxBottomValue<T, /*IsMax=*/false, Eigen::NumTraits<T>::IsInteger>::bottom_value();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
scalar_min_op<T, T, NaNPropagation> op;
return op(saccum, op.predux(vaccum));
}
};
template <typename T, typename Device, int NaNPropagation>
struct reducer_traits<MinReducer<T, NaNPropagation>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = PacketType<T, Device>::HasMin,
IsStateful = false,
IsExactlyAssociative = (NaNPropagation!=PropagateFast)
};
};
template <typename T> struct ProdReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
internal::scalar_product_op<T> prod_op;
(*accum) = prod_op(*accum, t);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmul<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
internal::scalar_cast_op<int, T> conv;
return conv(1);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(initialize());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const {
return vaccum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const {
internal::scalar_product_op<T> prod_op;
return prod_op(saccum, predux_mul(vaccum));
}
};
template <typename T, typename Device>
struct reducer_traits<ProdReducer<T>, Device> {
enum {
Cost = NumTraits<T>::MulCost,
PacketAccess = PacketType<T, Device>::HasMul,
IsStateful = false,
IsExactlyAssociative = true
};
};
struct AndReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
*accum = *accum && t;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const {
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const {
return accum;
}
};
template <typename Device>
struct reducer_traits<AndReducer, Device> {
enum {
Cost = 1,
PacketAccess = false,
IsStateful = false,
IsExactlyAssociative = true
};
};
struct OrReducer {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const {
*accum = *accum || t;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const {
return false;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const {
return accum;
}
};
template <typename Device>
struct reducer_traits<OrReducer, Device> {
enum {
Cost = 1,
PacketAccess = false,
IsStateful = false,
IsExactlyAssociative = true
};
};
// Argmin/Argmax reducers. Returns the first occurrence if multiple locations
// contain the same min/max value.
template <typename T> struct ArgMaxTupleReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t.second < accum->second) {
return;
} else if (t.second > accum->second || accum->first > t.first ) {
*accum = t;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return T(0, NumTraits<typename T::second_type>::lowest());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const {
return accum;
}
};
template <typename T, typename Device>
struct reducer_traits<ArgMaxTupleReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = false,
IsStateful = false,
IsExactlyAssociative = true
};
};
template <typename T> struct ArgMinTupleReducer
{
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const {
if (t.second > accum->second) {
return;
} else if (t.second < accum->second || accum->first > t.first) {
*accum = t;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return T(0, NumTraits<typename T::second_type>::highest());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const {
return accum;
}
};
template <typename T, typename Device>
struct reducer_traits<ArgMinTupleReducer<T>, Device> {
enum {
Cost = NumTraits<T>::AddCost,
PacketAccess = false,
IsStateful = false,
IsExactlyAssociative = true
};
};
template <typename T, typename Index, size_t NumDims>
class GaussianGenerator {
public:
static const bool PacketAccess = false;
EIGEN_DEVICE_FUNC GaussianGenerator(const array<T, NumDims>& means,
const array<T, NumDims>& std_devs)
: m_means(means)
{
EIGEN_UNROLL_LOOP
for (size_t i = 0; i < NumDims; ++i) {
m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2;
}
}
EIGEN_DEVICE_FUNC T operator()(const array<Index, NumDims>& coordinates) const {
T tmp = T(0);
EIGEN_UNROLL_LOOP
for (size_t i = 0; i < NumDims; ++i) {
T offset = coordinates[i] - m_means[i];
tmp += offset * offset / m_two_sigmas[i];
}
return numext::exp(-tmp);
}
private:
array<T, NumDims> m_means;
array<T, NumDims> m_two_sigmas;
};
template <typename T, typename Index, size_t NumDims>
struct functor_traits<GaussianGenerator<T, Index, NumDims> > {
enum {
Cost = NumDims * (2 * NumTraits<T>::AddCost + NumTraits<T>::MulCost +
functor_traits<scalar_quotient_op<T, T> >::Cost) +
functor_traits<scalar_exp_op<T> >::Cost,
PacketAccess = GaussianGenerator<T, Index, NumDims>::PacketAccess
};
};
template <typename Scalar>
struct scalar_clamp_op {
EIGEN_DEVICE_FUNC inline scalar_clamp_op(const Scalar& _min, const Scalar& _max) : m_min(_min), m_max(_max) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar
operator()(const Scalar& x) const {
return numext::mini(numext::maxi(x, m_min), m_max);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet
packetOp(const Packet& x) const {
return internal::pmin(internal::pmax(x, pset1<Packet>(m_min)), pset1<Packet>(m_max));
}
const Scalar m_min;
const Scalar m_max;
};
template<typename Scalar>
struct functor_traits<scalar_clamp_op<Scalar> >
{ enum { Cost = 2 * NumTraits<Scalar>::AddCost, PacketAccess = (packet_traits<Scalar>::HasMin && packet_traits<Scalar>::HasMax)}; };
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
| 15,269
| 30.226994
| 132
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
namespace Eigen {
/** \class TensorGeneratorOp
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor generator class.
*
*
*/
namespace internal {
template<typename Generator, typename XprType>
struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<typename Generator, typename XprType>
struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense>
{
typedef const TensorGeneratorOp<Generator, XprType>& type;
};
template<typename Generator, typename XprType>
struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type>
{
typedef TensorGeneratorOp<Generator, XprType> type;
};
} // end namespace internal
template<typename Generator, typename XprType>
class TensorGeneratorOp : public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested;
typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator)
: m_xpr(expr), m_generator(generator) {}
EIGEN_DEVICE_FUNC
const Generator& generator() const { return m_generator; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const Generator m_generator;
};
// Eval as rvalue
template<typename Generator, typename ArgType, typename Device>
struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device>
{
typedef TensorGeneratorOp<Generator, ArgType> XprType;
typedef typename XprType::Index Index;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
static const int NumDims = internal::array_size<Dimensions>::value;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = false,
PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = true,
PreferBlockAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
typedef internal::TensorIntDivisor<Index> IndexDivisor;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
Layout, Index>
TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_device(device), m_generator(op.generator())
{
TensorEvaluator<ArgType, Device> argImpl(op.expression(), device);
m_dimensions = argImpl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_strides[0] = 1;
EIGEN_UNROLL_LOOP
for (int i = 1; i < NumDims; ++i) {
m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
}
} else {
m_strides[NumDims - 1] = 1;
EIGEN_UNROLL_LOOP
for (int i = NumDims - 2; i >= 0; --i) {
m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]);
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
return true;
}
EIGEN_STRONG_INLINE void cleanup() {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
array<Index, NumDims> coords;
extract_coordinates(index, coords);
return m_generator(coords);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
const int packetSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < dimensions().TotalSize());
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
const size_t target_size = m_device.firstLevelCacheSize();
// TODO(ezhulenev): Generator should have a cost.
return internal::TensorBlockResourceRequirements::skewed<Scalar>(
target_size);
}
struct BlockIteratorState {
Index stride;
Index span;
Index size;
Index count;
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool /*root_of_expr_ast*/ = false) const {
static const bool is_col_major =
static_cast<int>(Layout) == static_cast<int>(ColMajor);
// Compute spatial coordinates for the first block element.
array<Index, NumDims> coords;
extract_coordinates(desc.offset(), coords);
array<Index, NumDims> initial_coords = coords;
// Offset in the output block buffer.
Index offset = 0;
// Initialize output block iterator state. Dimension in this array are
// always in inner_most -> outer_most order (col major layout).
array<BlockIteratorState, NumDims> it;
for (int i = 0; i < NumDims; ++i) {
const int dim = is_col_major ? i : NumDims - 1 - i;
it[i].size = desc.dimension(dim);
it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride);
it[i].span = it[i].stride * (it[i].size - 1);
it[i].count = 0;
}
eigen_assert(it[0].stride == 1);
// Prepare storage for the materialized generator result.
const typename TensorBlock::Storage block_storage =
TensorBlock::prepareStorage(desc, scratch);
CoeffReturnType* block_buffer = block_storage.data();
static const int packet_size = PacketType<CoeffReturnType, Device>::size;
static const int inner_dim = is_col_major ? 0 : NumDims - 1;
const Index inner_dim_size = it[0].size;
const Index inner_dim_vectorized = inner_dim_size - packet_size;
while (it[NumDims - 1].count < it[NumDims - 1].size) {
Index i = 0;
// Generate data for the vectorized part of the inner-most dimension.
for (; i <= inner_dim_vectorized; i += packet_size) {
for (Index j = 0; j < packet_size; ++j) {
array<Index, NumDims> j_coords = coords; // Break loop dependence.
j_coords[inner_dim] += j;
*(block_buffer + offset + i + j) = m_generator(j_coords);
}
coords[inner_dim] += packet_size;
}
// Finalize non-vectorized part of the inner-most dimension.
for (; i < inner_dim_size; ++i) {
*(block_buffer + offset + i) = m_generator(coords);
coords[inner_dim]++;
}
coords[inner_dim] = initial_coords[inner_dim];
// For the 1d tensor we need to generate only one inner-most dimension.
if (NumDims == 1) break;
// Update offset.
for (i = 1; i < NumDims; ++i) {
if (++it[i].count < it[i].size) {
offset += it[i].stride;
coords[is_col_major ? i : NumDims - 1 - i]++;
break;
}
if (i != NumDims - 1) it[i].count = 0;
coords[is_col_major ? i : NumDims - 1 - i] =
initial_coords[is_col_major ? i : NumDims - 1 - i];
offset -= it[i].span;
}
}
return block_storage.AsTensorMaterializedBlock();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
costPerCoeff(bool) const {
// TODO(rmlarsen): This is just a placeholder. Define interface to make
// generators return their cost.
return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() +
TensorOpCost::MulCost<Scalar>());
}
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler&) const {}
#endif
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void extract_coordinates(Index index, array<Index, NumDims>& coords) const {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_fast_strides[i];
index -= idx * m_strides[i];
coords[i] = idx;
}
coords[0] = index;
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_fast_strides[i];
index -= idx * m_strides[i];
coords[i] = idx;
}
coords[NumDims-1] = index;
}
}
const Device EIGEN_DEVICE_REF m_device;
Dimensions m_dimensions;
array<Index, NumDims> m_strides;
array<IndexDivisor, NumDims> m_fast_strides;
Generator m_generator;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
| 10,920
| 35.042904
| 116
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
#define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
namespace Eigen {
/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors.
*
* This function computes the regularized incomplete beta function (integral).
*
*/
template <typename ADerived, typename BDerived, typename XDerived>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const
TensorCwiseTernaryOp<internal::scalar_betainc_op<typename XDerived::Scalar>,
const ADerived, const BDerived, const XDerived>
betainc(const ADerived& a, const BDerived& b, const XDerived& x) {
return TensorCwiseTernaryOp<
internal::scalar_betainc_op<typename XDerived::Scalar>, const ADerived,
const BDerived, const XDerived>(
a, b, x, internal::scalar_betainc_op<typename XDerived::Scalar>());
}
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
| 1,316
| 37.735294
| 105
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
// Copyright (C) 2018 Deven Desai <deven.desai.amd@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H)
#define EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
// Note that we are using EIGEN_USE_HIP here instead of EIGEN_HIPCC...this is by design
// There is code in the Tensorflow codebase that will define EIGEN_USE_GPU, but
// for some reason gets sent to the gcc/host compiler instead of the gpu/nvcc/hipcc compiler
// When compiling such files, gcc will end up trying to pick up the CUDA headers by
// default (see the code within "unsupported/Eigen/CXX11/Tensor" that is guarded by EIGEN_USE_GPU)
// This will obviously not work when trying to compile tensorflow on a system with no CUDA
// To work around this issue for HIP systems (and leave the default behaviour intact), the
// HIP tensorflow build defines EIGEN_USE_HIP when compiling all source files, and
// "unsupported/Eigen/CXX11/Tensor" has been updated to use HIP header when EIGEN_USE_HIP is
// defined. In continuation of that requirement, the guard here needs to be EIGEN_USE_HIP as well
#if defined(EIGEN_USE_HIP)
#define gpuStream_t hipStream_t
#define gpuDeviceProp_t hipDeviceProp_t
#define gpuError_t hipError_t
#define gpuSuccess hipSuccess
#define gpuErrorNotReady hipErrorNotReady
#define gpuGetDeviceCount hipGetDeviceCount
#define gpuGetLastError hipGetLastError
#define gpuPeekAtLastError hipPeekAtLastError
#define gpuGetErrorName hipGetErrorName
#define gpuGetErrorString hipGetErrorString
#define gpuGetDeviceProperties hipGetDeviceProperties
#define gpuStreamDefault hipStreamDefault
#define gpuGetDevice hipGetDevice
#define gpuSetDevice hipSetDevice
#define gpuMalloc hipMalloc
#define gpuFree hipFree
#define gpuMemsetAsync hipMemsetAsync
#define gpuMemcpyAsync hipMemcpyAsync
#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice
#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
#define gpuStreamQuery hipStreamQuery
#define gpuSharedMemConfig hipSharedMemConfig
#define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig
#define gpuStreamSynchronize hipStreamSynchronize
#define gpuDeviceSynchronize hipDeviceSynchronize
#define gpuMemcpy hipMemcpy
#else
#define gpuStream_t cudaStream_t
#define gpuDeviceProp_t cudaDeviceProp
#define gpuError_t cudaError_t
#define gpuSuccess cudaSuccess
#define gpuErrorNotReady cudaErrorNotReady
#define gpuGetDeviceCount cudaGetDeviceCount
#define gpuGetLastError cudaGetLastError
#define gpuPeekAtLastError cudaPeekAtLastError
#define gpuGetErrorName cudaGetErrorName
#define gpuGetErrorString cudaGetErrorString
#define gpuGetDeviceProperties cudaGetDeviceProperties
#define gpuStreamDefault cudaStreamDefault
#define gpuGetDevice cudaGetDevice
#define gpuSetDevice cudaSetDevice
#define gpuMalloc cudaMalloc
#define gpuFree cudaFree
#define gpuMemsetAsync cudaMemsetAsync
#define gpuMemcpyAsync cudaMemcpyAsync
#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
#define gpuStreamQuery cudaStreamQuery
#define gpuSharedMemConfig cudaSharedMemConfig
#define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig
#define gpuStreamSynchronize cudaStreamSynchronize
#define gpuDeviceSynchronize cudaDeviceSynchronize
#define gpuMemcpy cudaMemcpy
#endif
// gpu_assert can be overridden
#ifndef gpu_assert
#if defined(EIGEN_HIP_DEVICE_COMPILE)
// HIPCC do not support the use of assert on the GPU side.
#define gpu_assert(COND)
#else
#define gpu_assert(COND) assert(COND)
#endif
#endif // gpu_assert
#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
| 4,068
| 39.69
| 98
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
// Copyright (C) 2018 Deven Desai <deven.desai.amd@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H)
#ifndef EIGEN_PERMANENTLY_ENABLE_GPU_HIP_CUDA_DEFINES
#undef gpuStream_t
#undef gpuDeviceProp_t
#undef gpuError_t
#undef gpuSuccess
#undef gpuErrorNotReady
#undef gpuGetDeviceCount
#undef gpuGetErrorString
#undef gpuGetDeviceProperties
#undef gpuStreamDefault
#undef gpuGetDevice
#undef gpuSetDevice
#undef gpuMalloc
#undef gpuFree
#undef gpuMemsetAsync
#undef gpuMemcpyAsync
#undef gpuMemcpyDeviceToDevice
#undef gpuMemcpyDeviceToHost
#undef gpuMemcpyHostToDevice
#undef gpuStreamQuery
#undef gpuSharedMemConfig
#undef gpuDeviceSetSharedMemConfig
#undef gpuStreamSynchronize
#undef gpuDeviceSynchronize
#undef gpuMemcpy
#endif // EIGEN_PERMANENTLY_ENABLE_GPU_HIP_CUDA_DEFINES
#undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H
| 1,267
| 27.177778
| 69
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H
#define EIGEN_CXX11_TENSOR_TENSOR_IO_H
namespace Eigen {
namespace internal {
// Print the tensor as a 2d matrix
template <typename Tensor, int Rank>
struct TensorPrinter {
static void run (std::ostream& os, const Tensor& tensor) {
typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar;
typedef typename Tensor::Index Index;
const Index total_size = internal::array_prod(tensor.dimensions());
if (total_size > 0) {
const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions());
static const int layout = Tensor::Layout;
Map<const Array<Scalar, Dynamic, Dynamic, layout> > matrix(const_cast<Scalar*>(tensor.data()), first_dim, total_size/first_dim);
os << matrix;
}
}
};
// Print the tensor as a vector
template <typename Tensor>
struct TensorPrinter<Tensor, 1> {
static void run (std::ostream& os, const Tensor& tensor) {
typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar;
typedef typename Tensor::Index Index;
const Index total_size = internal::array_prod(tensor.dimensions());
if (total_size > 0) {
Map<const Array<Scalar, Dynamic, 1> > array(const_cast<Scalar*>(tensor.data()), total_size);
os << array;
}
}
};
// Print the tensor as a scalar
template <typename Tensor>
struct TensorPrinter<Tensor, 0> {
static void run (std::ostream& os, const Tensor& tensor) {
os << tensor.coeff(0);
}
};
}
template <typename T>
std::ostream& operator << (std::ostream& os, const TensorBase<T, ReadOnlyAccessors>& expr) {
typedef TensorEvaluator<const TensorForcedEvalOp<const T>, DefaultDevice> Evaluator;
typedef typename Evaluator::Dimensions Dimensions;
// Evaluate the expression if needed
TensorForcedEvalOp<const T> eval = expr.eval();
Evaluator tensor(eval, DefaultDevice());
tensor.evalSubExprsIfNeeded(NULL);
// Print the result
static const int rank = internal::array_size<Dimensions>::value;
internal::TensorPrinter<Evaluator, rank>::run(os, tensor);
// Cleanup.
tensor.cleanup();
return os;
}
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H
| 2,560
| 31.0125
| 134
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Ke Yang <yangke@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
#define EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
namespace Eigen {
/** \class TensorInflation
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor inflation class.
*
*
*/
namespace internal {
template<typename Strides, typename XprType>
struct traits<TensorInflationOp<Strides, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<typename Strides, typename XprType>
struct eval<TensorInflationOp<Strides, XprType>, Eigen::Dense>
{
typedef const TensorInflationOp<Strides, XprType>& type;
};
template<typename Strides, typename XprType>
struct nested<TensorInflationOp<Strides, XprType>, 1, typename eval<TensorInflationOp<Strides, XprType> >::type>
{
typedef TensorInflationOp<Strides, XprType> type;
};
} // end namespace internal
template<typename Strides, typename XprType>
class TensorInflationOp : public TensorBase<TensorInflationOp<Strides, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorInflationOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorInflationOp>::type Nested;
typedef typename Eigen::internal::traits<TensorInflationOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorInflationOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorInflationOp(const XprType& expr, const Strides& strides)
: m_xpr(expr), m_strides(strides) {}
EIGEN_DEVICE_FUNC
const Strides& strides() const { return m_strides; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const Strides m_strides;
};
// Eval as rvalue
template<typename Strides, typename ArgType, typename Device>
struct TensorEvaluator<const TensorInflationOp<Strides, ArgType>, Device>
{
typedef TensorInflationOp<Strides, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_strides(op.strides())
{
m_dimensions = m_impl.dimensions();
// Expand each dimension to the inflated dimension.
for (int i = 0; i < NumDims; ++i) {
m_dimensions[i] = (m_dimensions[i] - 1) * op.strides()[i] + 1;
}
// Remember the strides for fast division.
for (int i = 0; i < NumDims; ++i) {
m_fastStrides[i] = internal::TensorIntDivisor<Index>(m_strides[i]);
}
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_outputStrides[0] = 1;
m_inputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
}
} else { // RowMajor
m_outputStrides[NumDims-1] = 1;
m_inputStrides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
// Computes the input index given the output index. Returns true if the output
// index doesn't fall into a hole.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool getInputIndex(Index index, Index* inputIndex) const
{
eigen_assert(index < dimensions().TotalSize());
*inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
if (idx != idx / m_fastStrides[i] * m_strides[i]) {
return false;
}
*inputIndex += idx / m_strides[i] * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
if (index != index / m_fastStrides[0] * m_strides[0]) {
return false;
}
*inputIndex += index / m_strides[0];
return true;
} else {
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
if (idx != idx / m_fastStrides[i] * m_strides[i]) {
return false;
}
*inputIndex += idx / m_strides[i] * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
if (index != index / m_fastStrides[NumDims-1] * m_strides[NumDims-1]) {
return false;
}
*inputIndex += index / m_strides[NumDims - 1];
}
return true;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
Index inputIndex = 0;
if (getInputIndex(index, &inputIndex)) {
return m_impl.coeff(inputIndex);
} else {
return Scalar(0);
}
}
// TODO(yangke): optimize this function so that we can detect and produce
// all-zero packets
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
const double compute_cost = NumDims * (3 * TensorOpCost::DivCost<Index>() +
3 * TensorOpCost::MulCost<Index>() +
2 * TensorOpCost::AddCost<Index>());
const double input_size = m_impl.dimensions().TotalSize();
const double output_size = m_dimensions.TotalSize();
if (output_size == 0)
return TensorOpCost();
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0,
compute_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
array<Index, NumDims> m_inputStrides;
TensorEvaluator<ArgType, Device> m_impl;
const Strides m_strides;
array<internal::TensorIntDivisor<Index>, NumDims> m_fastStrides;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
| 9,094
| 35.673387
| 112
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
#define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
#if EIGEN_HAS_VARIADIC_TEMPLATES
#include <initializer_list>
namespace Eigen {
/** \class TensorInitializer
* \ingroup CXX11_Tensor_Module
*
* \brief Helper template to initialize Tensors from std::initializer_lists.
*/
namespace internal {
template <typename Derived, int N>
struct Initializer {
typedef std::initializer_list<
typename Initializer<Derived, N - 1>::InitList> InitList;
static void run(TensorEvaluator<Derived, DefaultDevice>& tensor,
Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>* indices,
const InitList& vals) {
int i = 0;
for (const auto& v : vals) {
(*indices)[traits<Derived>::NumDimensions - N] = i++;
Initializer<Derived, N - 1>::run(tensor, indices, v);
}
}
};
template <typename Derived>
struct Initializer<Derived, 1> {
typedef std::initializer_list<typename traits<Derived>::Scalar> InitList;
static void run(TensorEvaluator<Derived, DefaultDevice>& tensor,
Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>* indices,
const InitList& vals) {
int i = 0;
// There is likely a faster way to do that than iterating.
for (const auto& v : vals) {
(*indices)[traits<Derived>::NumDimensions - 1] = i++;
tensor.coeffRef(*indices) = v;
}
}
};
template <typename Derived>
struct Initializer<Derived, 0> {
typedef typename traits<Derived>::Scalar InitList;
static void run(TensorEvaluator<Derived, DefaultDevice>& tensor,
Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>*,
const InitList& v) {
tensor.coeffRef(0) = v;
}
};
template <typename Derived, int N>
void initialize_tensor(TensorEvaluator<Derived, DefaultDevice>& tensor,
const typename Initializer<Derived, traits<Derived>::NumDimensions>::InitList& vals) {
Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions> indices;
Initializer<Derived, traits<Derived>::NumDimensions>::run(tensor, &indices, vals);
}
} // namespace internal
} // namespace Eigen
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
#endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
| 2,730
| 31.903614
| 109
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
#define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
namespace Eigen {
/** \class TensorLayoutSwap
* \ingroup CXX11_Tensor_Module
*
* \brief Swap the layout from col-major to row-major, or row-major
* to col-major, and invert the order of the dimensions.
*
* Beware: the dimensions are reversed by this operation. If you want to
* preserve the ordering of the dimensions, you need to combine this
* operation with a shuffle.
*
* \example:
* Tensor<float, 2, ColMajor> input(2, 4);
* Tensor<float, 2, RowMajor> output = input.swap_layout();
* eigen_assert(output.dimension(0) == 4);
* eigen_assert(output.dimension(1) == 2);
*
* array<int, 2> shuffle(1, 0);
* output = input.swap_layout().shuffle(shuffle);
* eigen_assert(output.dimension(0) == 2);
* eigen_assert(output.dimension(1) == 4);
*
*/
namespace internal {
template<typename XprType>
struct traits<TensorLayoutSwapOp<XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
static const int Layout = (traits<XprType>::Layout == ColMajor) ? RowMajor : ColMajor;
typedef typename XprTraits::PointerType PointerType;
};
template<typename XprType>
struct eval<TensorLayoutSwapOp<XprType>, Eigen::Dense>
{
typedef const TensorLayoutSwapOp<XprType>& type;
};
template<typename XprType>
struct nested<TensorLayoutSwapOp<XprType>, 1, typename eval<TensorLayoutSwapOp<XprType> >::type>
{
typedef TensorLayoutSwapOp<XprType> type;
};
} // end namespace internal
template<typename XprType>
class TensorLayoutSwapOp : public TensorBase<TensorLayoutSwapOp<XprType>, WriteAccessors>
{
public:
typedef TensorBase<TensorLayoutSwapOp<XprType>, WriteAccessors> Base;
typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename Eigen::internal::nested<TensorLayoutSwapOp>::type Nested;
typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp(const XprType& expr)
: m_xpr(expr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorLayoutSwapOp)
protected:
typename XprType::Nested m_xpr;
};
// Eval as rvalue
template<typename ArgType, typename Device>
struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
{
typedef TensorLayoutSwapOp<ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
CoordAccess = false, // to be implemented
RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
{
for(int i = 0; i < NumDims; ++i) {
m_dimensions[i] = m_impl.dimensions()[NumDims-1-i];
}
}
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
return m_impl.evalSubExprsIfNeeded(data);
}
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_impl.coeff(index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return m_impl.template packet<LoadMode>(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
return m_impl.costPerCoeff(vectorized);
}
EIGEN_DEVICE_FUNC typename Storage::Type data() const {
return constCast(m_impl.data());
}
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
protected:
TensorEvaluator<ArgType, Device> m_impl;
Dimensions m_dimensions;
};
// Eval as lvalue
template<typename ArgType, typename Device>
struct TensorEvaluator<TensorLayoutSwapOp<ArgType>, Device>
: public TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
{
typedef TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device> Base;
typedef TensorLayoutSwapOp<ArgType> XprType;
enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
CoordAccess = false // to be implemented
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
return this->m_impl.coeffRef(index);
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
this->m_impl.template writePacket<StoreMode>(index, x);
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
| 7,769
| 34.806452
| 126
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H
#define EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H
/** use this macro in sfinae selection in templated functions
*
* template<typename T,
* typename std::enable_if< isBanana<T>::value , int >::type = 0
* >
* void foo(){}
*
* becomes =>
*
* template<typename TopoType,
* SFINAE_ENABLE_IF( isBanana<T>::value )
* >
* void foo(){}
*/
// SFINAE requires variadic templates
#if !defined(EIGEN_GPUCC)
#if EIGEN_HAS_VARIADIC_TEMPLATES
// SFINAE doesn't work for gcc <= 4.7
#ifdef EIGEN_COMP_GNUC
#if EIGEN_GNUC_AT_LEAST(4,8)
#define EIGEN_HAS_SFINAE
#endif
#else
#define EIGEN_HAS_SFINAE
#endif
#endif
#endif
#define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \
typename internal::enable_if< ( __condition__ ) , int >::type = 0
// Define a macro to use a reference on the host but a value on the device
#if defined(SYCL_DEVICE_ONLY)
#define EIGEN_DEVICE_REF
#else
#define EIGEN_DEVICE_REF &
#endif
// Define a macro for catching SYCL exceptions if exceptions are enabled
#define EIGEN_SYCL_TRY_CATCH(X) \
do { \
EIGEN_TRY {X;} \
EIGEN_CATCH(const cl::sycl::exception& e) { \
EIGEN_THROW_X(std::runtime_error("SYCL exception at " + \
std::string(__FILE__) + ":" + \
std::to_string(__LINE__) + "\n" + \
e.what())); \
} \
} while (false)
// Define a macro if local memory flags are unset or one of them is set
// Setting both flags is the same as unsetting them
#if (!defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM)) || \
(defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM))
#define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON 1
#define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF 1
#elif defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM)
#define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON 1
#elif !defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM)
#define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF 1
#endif
#if EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
template <typename OtherDerived> \
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const OtherDerived& other) { Base::operator=(other); return *this; }
#else
#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
#endif
/** \internal
* \brief Macro to manually inherit assignment operators.
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
* This also inherits template<OtherDerived> operator=(const OtherDerived&) assignments.
* With C++11 or later this also default-implements the copy-constructor
*/
#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived)
#endif
| 3,642
| 35.79798
| 129
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_H
#define EIGEN_CXX11_TENSOR_TENSOR_META_H
namespace Eigen {
template<bool cond> struct Cond {};
template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
const T1& choose(Cond<true>, const T1& first, const T2&) {
return first;
}
template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
const T2& choose(Cond<false>, const T1&, const T2& second) {
return second;
}
template <typename T, typename X, typename Y>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T divup(const X x, const Y y) {
return static_cast<T>((x + y - 1) / y);
}
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T divup(const T x, const T y) {
return static_cast<T>((x + y - 1) / y);
}
template <size_t n> struct max_n_1 {
static const size_t size = n;
};
template <> struct max_n_1<0> {
static const size_t size = 1;
};
// Default packet types
template <typename Scalar, typename Device>
struct PacketType : internal::packet_traits<Scalar> {
typedef typename internal::packet_traits<Scalar>::type type;
};
// For CUDA packet types when using a GpuDevice
#if defined(EIGEN_USE_GPU) && defined(EIGEN_HAS_GPU_FP16)
typedef ulonglong2 Packet4h2;
template<>
struct PacketType<half, GpuDevice> {
typedef Packet4h2 type;
static const int size = 8;
enum {
HasAdd = 1,
HasSub = 1,
HasMul = 1,
HasNegate = 1,
HasAbs = 1,
HasArg = 0,
HasAbs2 = 0,
HasMin = 1,
HasMax = 1,
HasConj = 0,
HasSetLinear = 0,
HasBlend = 0,
HasDiv = 1,
HasSqrt = 1,
HasRsqrt = 1,
HasExp = 1,
HasExpm1 = 0,
HasLog = 1,
HasLog1p = 0,
HasLog10 = 0,
HasPow = 1,
};
};
#endif
#if defined(EIGEN_USE_SYCL)
namespace TensorSycl {
namespace internal {
template <typename Index, Index A, Index B> struct PlusOp {
static constexpr Index Value = A + B;
};
template <typename Index, Index A, Index B> struct DivOp {
static constexpr Index Value = A / B;
};
template <typename Index, Index start, Index end, Index step,
template <class Indx, Indx...> class StepOp>
struct static_for {
template <typename UnaryOperator>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void loop(UnaryOperator op) {
op(start);
static_for<Index, StepOp<Index, start, step>::Value, end, step,
StepOp>::loop(op);
}
};
template <typename Index, Index end, Index step,
template <class Indx, Indx...> class StepOp>
struct static_for<Index, end, end, step, StepOp> {
template <typename UnaryOperator>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void loop(UnaryOperator) {}
};
template <typename OutScalar, typename Device, bool Vectorizable>
struct Vectorise {
static const int PacketSize = 1;
typedef OutScalar PacketReturnType;
};
template <typename OutScalar, typename Device>
struct Vectorise<OutScalar, Device, true> {
static const int PacketSize = Eigen::PacketType<OutScalar, Device>::size;
typedef typename Eigen::PacketType<OutScalar, Device>::type PacketReturnType;
};
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index roundUp(Index x, Index y) {
return ((((x) + (y)-1) / (y)) * (y));
}
} // namespace internal
} // namespace TensorSycl
template <>
struct PacketType<half, SyclDevice> {
typedef half type;
static const int size = 1;
enum {
HasAdd = 0,
HasSub = 0,
HasMul = 0,
HasNegate = 0,
HasAbs = 0,
HasArg = 0,
HasAbs2 = 0,
HasMin = 0,
HasMax = 0,
HasConj = 0,
HasSetLinear = 0,
HasBlend = 0
};
};
template <typename Scalar>
struct PacketType<Scalar, SyclDevice> : internal::default_packet_traits {
typedef Scalar type;
typedef Scalar half;
enum {
Vectorizable = 0,
size = 1,
AlignedOnScalar = 0,
HasHalfPacket = 0
};
enum {
HasAdd = 0,
HasSub = 0,
HasMul = 0,
HasNegate = 0,
HasAbs = 0,
HasAbs2 = 0,
HasMin = 0,
HasMax = 0,
HasConj = 0,
HasSetLinear = 0
};
};
template <typename Scalar>
struct PacketType<Scalar, const SyclDevice> : PacketType<Scalar, SyclDevice>{};
#ifndef EIGEN_DONT_VECTORIZE_SYCL
#define PACKET_TYPE(CVQual, Type, val, lengths, DEV)\
template<> struct PacketType<CVQual Type, DEV> : internal::sycl_packet_traits<val, lengths> \
{\
typedef typename internal::packet_traits<Type>::type type;\
typedef typename internal::packet_traits<Type>::half half;\
};
PACKET_TYPE(const, float, 1, 4, SyclDevice)
PACKET_TYPE(, float, 1, 4, SyclDevice)
PACKET_TYPE(const, float, 1, 4, const SyclDevice)
PACKET_TYPE(, float, 1, 4, const SyclDevice)
PACKET_TYPE(const, double, 0, 2, SyclDevice)
PACKET_TYPE(, double, 0, 2, SyclDevice)
PACKET_TYPE(const, double, 0, 2, const SyclDevice)
PACKET_TYPE(, double, 0, 2, const SyclDevice)
#undef PACKET_TYPE
template<> struct PacketType<half, const SyclDevice>: PacketType<half, SyclDevice>{};
template<> struct PacketType<const half, const SyclDevice>: PacketType<half, SyclDevice>{};
#endif
#endif
// Tuple mimics std::pair but works on e.g. nvcc.
template <typename U, typename V> struct Tuple {
public:
U first;
V second;
typedef U first_type;
typedef V second_type;
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Tuple() : first(), second() {}
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Tuple(const U& f, const V& s) : first(f), second(s) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void swap(Tuple& rhs) {
using numext::swap;
swap(first, rhs.first);
swap(second, rhs.second);
}
};
template <typename U, typename V>
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool operator==(const Tuple<U, V>& x, const Tuple<U, V>& y) {
return (x.first == y.first && x.second == y.second);
}
template <typename U, typename V>
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool operator!=(const Tuple<U, V>& x, const Tuple<U, V>& y) {
return !(x == y);
}
// Can't use std::pairs on cuda devices
template <typename Idx> struct IndexPair {
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {}
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s) {}
EIGEN_DEVICE_FUNC void set(IndexPair<Idx> val) {
first = val.first;
second = val.second;
}
Idx first;
Idx second;
};
#ifdef EIGEN_HAS_SFINAE
namespace internal {
template<typename IndexType, typename Index, Index... Is>
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array<Index, sizeof...(Is)> customIndices2Array(IndexType& idx, numeric_list<Index, Is...>) {
return { idx[Is]... };
}
template<typename IndexType, typename Index>
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array<Index, 0> customIndices2Array(IndexType&, numeric_list<Index>) {
return array<Index, 0>();
}
/** Make an array (for index/dimensions) out of a custom index */
template<typename Index, std::size_t NumIndices, typename IndexType>
EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array<Index, NumIndices> customIndices2Array(IndexType& idx) {
return customIndices2Array(idx, typename gen_numeric_list<Index, NumIndices>::type{});
}
template <typename B, typename D>
struct is_base_of
{
typedef char (&yes)[1];
typedef char (&no)[2];
template <typename BB, typename DD>
struct Host
{
operator BB*() const;
operator DD*();
};
template<typename T>
static yes check(D*, T);
static no check(B*, int);
static const bool value = sizeof(check(Host<B,D>(), int())) == sizeof(yes);
};
}
#endif
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_META_H
| 8,104
| 24.977564
| 104
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
#define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
namespace Eigen {
/** \class TensorPatch
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor patch class.
*
*
*/
namespace internal {
template<typename PatchDim, typename XprType>
struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<typename PatchDim, typename XprType>
struct eval<TensorPatchOp<PatchDim, XprType>, Eigen::Dense>
{
typedef const TensorPatchOp<PatchDim, XprType>& type;
};
template<typename PatchDim, typename XprType>
struct nested<TensorPatchOp<PatchDim, XprType>, 1, typename eval<TensorPatchOp<PatchDim, XprType> >::type>
{
typedef TensorPatchOp<PatchDim, XprType> type;
};
} // end namespace internal
template<typename PatchDim, typename XprType>
class TensorPatchOp : public TensorBase<TensorPatchOp<PatchDim, XprType>, ReadOnlyAccessors>
{
public:
typedef typename Eigen::internal::traits<TensorPatchOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorPatchOp>::type Nested;
typedef typename Eigen::internal::traits<TensorPatchOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorPatchOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims)
: m_xpr(expr), m_patch_dims(patch_dims) {}
EIGEN_DEVICE_FUNC
const PatchDim& patch_dims() const { return m_patch_dims; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const PatchDim m_patch_dims;
};
// Eval as rvalue
template<typename PatchDim, typename ArgType, typename Device>
struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
{
typedef TensorPatchOp<PatchDim, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value + 1;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
{
Index num_patches = 1;
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
const PatchDim& patch_dims = op.patch_dims();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < NumDims-1; ++i) {
m_dimensions[i] = patch_dims[i];
num_patches *= (input_dims[i] - patch_dims[i] + 1);
}
m_dimensions[NumDims-1] = num_patches;
m_inputStrides[0] = 1;
m_patchStrides[0] = 1;
for (int i = 1; i < NumDims-1; ++i) {
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1);
}
m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
}
} else {
for (int i = 0; i < NumDims-1; ++i) {
m_dimensions[i+1] = patch_dims[i];
num_patches *= (input_dims[i] - patch_dims[i] + 1);
}
m_dimensions[0] = num_patches;
m_inputStrides[NumDims-2] = 1;
m_patchStrides[NumDims-2] = 1;
for (int i = NumDims-3; i >= 0; --i) {
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1);
}
m_outputStrides[NumDims-1] = 1;
for (int i = NumDims-2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0;
// Find the location of the first element of the patch.
Index patchIndex = index / m_outputStrides[output_stride_index];
// Find the offset of the element wrt the location of the first element.
Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index];
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 2; i > 0; --i) {
const Index patchIdx = patchIndex / m_patchStrides[i];
patchIndex -= patchIdx * m_patchStrides[i];
const Index offsetIdx = patchOffset / m_outputStrides[i];
patchOffset -= offsetIdx * m_outputStrides[i];
inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
}
} else {
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 2; ++i) {
const Index patchIdx = patchIndex / m_patchStrides[i];
patchIndex -= patchIdx * m_patchStrides[i];
const Index offsetIdx = patchOffset / m_outputStrides[i+1];
patchOffset -= offsetIdx * m_outputStrides[i+1];
inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
}
}
inputIndex += (patchIndex + patchOffset);
return m_impl.coeff(inputIndex);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0;
Index indices[2] = {index, index + PacketSize - 1};
Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index],
indices[1] / m_outputStrides[output_stride_index]};
Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index],
indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]};
Index inputIndices[2] = {0, 0};
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 2; i > 0; --i) {
const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i],
patchIndices[1] / m_patchStrides[i]};
patchIndices[0] -= patchIdx[0] * m_patchStrides[i];
patchIndices[1] -= patchIdx[1] * m_patchStrides[i];
const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i],
patchOffsets[1] / m_outputStrides[i]};
patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i];
patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i];
inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i];
inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i];
}
} else {
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 2; ++i) {
const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i],
patchIndices[1] / m_patchStrides[i]};
patchIndices[0] -= patchIdx[0] * m_patchStrides[i];
patchIndices[1] -= patchIdx[1] * m_patchStrides[i];
const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1],
patchOffsets[1] / m_outputStrides[i+1]};
patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1];
patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1];
inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i];
inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i];
}
}
inputIndices[0] += (patchIndices[0] + patchOffsets[0]);
inputIndices[1] += (patchIndices[1] + patchOffsets[1]);
if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
return rslt;
}
else {
EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize];
values[0] = m_impl.coeff(inputIndices[0]);
values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
EIGEN_UNROLL_LOOP
for (int i = 1; i < PacketSize-1; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
const double compute_cost = NumDims * (TensorOpCost::DivCost<Index>() +
TensorOpCost::MulCost<Index>() +
2 * TensorOpCost::AddCost<Index>());
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
array<Index, NumDims-1> m_inputStrides;
array<Index, NumDims-1> m_patchStrides;
TensorEvaluator<ArgType, Device> m_impl;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
| 11,474
| 38.297945
| 116
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
// Copyright (C) 2018 Mehdi Goli <eigen@codeplay.com> Codeplay Software Ltd.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H
#define EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H
namespace Eigen {
namespace internal {
namespace {
EIGEN_DEVICE_FUNC uint64_t get_random_seed() {
#if defined(EIGEN_GPU_COMPILE_PHASE)
// We don't support 3d kernels since we currently only use 1 and
// 2d kernels.
gpu_assert(threadIdx.z == 0);
return blockIdx.x * blockDim.x + threadIdx.x
+ gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y);
#else
// Rely on Eigen's random implementation.
return random<uint64_t>();
#endif
}
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state, uint64_t stream) {
// TODO: Unify with the implementation in the non blocking thread pool.
uint64_t current = *state;
// Update the internal state
*state = current * 6364136223846793005ULL + (stream << 1 | 1);
// Generate the random output (using the PCG-XSH-RS scheme)
return static_cast<unsigned>((current ^ (current >> 22)) >> (22 + (current >> 61)));
}
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) {
seed = seed ? seed : get_random_seed();
return seed * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
}
} // namespace
template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T RandomToTypeUniform(uint64_t* state, uint64_t stream) {
unsigned rnd = PCG_XSH_RS_generator(state, stream);
return static_cast<T>(rnd);
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Eigen::half RandomToTypeUniform<Eigen::half>(uint64_t* state, uint64_t stream) {
// Generate 10 random bits for the mantissa, merge with exponent.
unsigned rnd = PCG_XSH_RS_generator(state, stream);
const uint16_t half_bits = static_cast<uint16_t>(rnd & 0x3ffu) | (static_cast<uint16_t>(15) << 10);
Eigen::half result = Eigen::numext::bit_cast<Eigen::half>(half_bits);
// Return the final result
return result - Eigen::half(1.0f);
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Eigen::bfloat16 RandomToTypeUniform<Eigen::bfloat16>(uint64_t* state, uint64_t stream) {
// Generate 7 random bits for the mantissa, merge with exponent.
unsigned rnd = PCG_XSH_RS_generator(state, stream);
const uint16_t half_bits = static_cast<uint16_t>(rnd & 0x7fu) | (static_cast<uint16_t>(127) << 7);
Eigen::bfloat16 result = Eigen::numext::bit_cast<Eigen::bfloat16>(half_bits);
// Return the final result
return result - Eigen::bfloat16(1.0f);
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
float RandomToTypeUniform<float>(uint64_t* state, uint64_t stream) {
typedef union {
uint32_t raw;
float fp;
} internal;
internal result;
// Generate 23 random bits for the mantissa mantissa
const unsigned rnd = PCG_XSH_RS_generator(state, stream);
result.raw = rnd & 0x7fffffu;
// Set the exponent
result.raw |= (static_cast<uint32_t>(127) << 23);
// Return the final result
return result.fp - 1.0f;
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
double RandomToTypeUniform<double>(uint64_t* state, uint64_t stream) {
typedef union {
uint64_t raw;
double dp;
} internal;
internal result;
result.raw = 0;
// Generate 52 random bits for the mantissa
// First generate the upper 20 bits
unsigned rnd1 = PCG_XSH_RS_generator(state, stream) & 0xfffffu;
// The generate the lower 32 bits
unsigned rnd2 = PCG_XSH_RS_generator(state, stream);
result.raw = (static_cast<uint64_t>(rnd1) << 32) | rnd2;
// Set the exponent
result.raw |= (static_cast<uint64_t>(1023) << 52);
// Return the final result
return result.dp - 1.0;
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
std::complex<float> RandomToTypeUniform<std::complex<float> >(uint64_t* state, uint64_t stream) {
return std::complex<float>(RandomToTypeUniform<float>(state, stream),
RandomToTypeUniform<float>(state, stream));
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
std::complex<double> RandomToTypeUniform<std::complex<double> >(uint64_t* state, uint64_t stream) {
return std::complex<double>(RandomToTypeUniform<double>(state, stream),
RandomToTypeUniform<double>(state, stream));
}
template <typename T> class UniformRandomGenerator {
public:
static const bool PacketAccess = true;
// Uses the given "seed" if non-zero, otherwise uses a random seed.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator(
uint64_t seed = 0) {
m_state = PCG_XSH_RS_state(seed);
#ifdef EIGEN_USE_SYCL
// In SYCL it is not possible to build PCG_XSH_RS_state in one step.
// Therefor, we need two step to initializate the m_state.
// IN SYCL, the constructor of the functor is s called on the CPU
// and we get the clock seed here from the CPU. However, This seed is
//the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function.
// and only available on the Operator() function (which is called on the GPU).
// Thus for CUDA (((CLOCK + global_thread_id)* 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread
// but for SYCL ((CLOCK * 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread and each thread adds
// the (global_thread_id* 6364136223846793005ULL) for itself only once, in order to complete the construction
// similar to CUDA Therefore, the thread Id injection is not available at this stage.
//However when the operator() is called the thread ID will be avilable. So inside the opeator,
// we add the thrreadID, BlockId,... (which is equivalent of i)
//to the seed and construct the unique m_state per thead similar to cuda.
m_exec_once =false;
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator(
const UniformRandomGenerator& other) {
m_state = other.m_state;
#ifdef EIGEN_USE_SYCL
m_exec_once =other.m_exec_once;
#endif
}
template<typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T operator()(Index i) const {
#ifdef EIGEN_USE_SYCL
if(!m_exec_once) {
// This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread
// The (i * 6364136223846793005ULL) is the remaining part of the PCG_XSH_RS_state on the GPU side
m_state += (i * 6364136223846793005ULL);
m_exec_once =true;
}
#endif
T result = RandomToTypeUniform<T>(&m_state, i);
return result;
}
template<typename Packet, typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Packet packetOp(Index i) const {
const int packetSize = internal::unpacket_traits<Packet>::size;
EIGEN_ALIGN_MAX T values[packetSize];
#ifdef EIGEN_USE_SYCL
if(!m_exec_once) {
// This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread
m_state += (i * 6364136223846793005ULL);
m_exec_once =true;
}
#endif
EIGEN_UNROLL_LOOP
for (int j = 0; j < packetSize; ++j) {
values[j] = RandomToTypeUniform<T>(&m_state, i);
}
return internal::pload<Packet>(values);
}
private:
mutable uint64_t m_state;
#ifdef EIGEN_USE_SYCL
mutable bool m_exec_once;
#endif
};
template <typename Scalar>
struct functor_traits<UniformRandomGenerator<Scalar> > {
enum {
// Rough estimate for floating point, multiplied by ceil(sizeof(T) / sizeof(float)).
Cost = 12 * NumTraits<Scalar>::AddCost *
((sizeof(Scalar) + sizeof(float) - 1) / sizeof(float)),
PacketAccess = UniformRandomGenerator<Scalar>::PacketAccess
};
};
template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T RandomToTypeNormal(uint64_t* state, uint64_t stream) {
// Use the ratio of uniform method to generate numbers following a normal
// distribution. See for example Numerical Recipes chapter 7.3.9 for the
// details.
T u, v, q;
do {
u = RandomToTypeUniform<T>(state, stream);
v = T(1.7156) * (RandomToTypeUniform<T>(state, stream) - T(0.5));
const T x = u - T(0.449871);
const T y = numext::abs(v) + T(0.386595);
q = x*x + y * (T(0.196)*y - T(0.25472)*x);
} while (q > T(0.27597) &&
(q > T(0.27846) || v*v > T(-4) * numext::log(u) * u*u));
return v/u;
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
std::complex<float> RandomToTypeNormal<std::complex<float> >(uint64_t* state, uint64_t stream) {
return std::complex<float>(RandomToTypeNormal<float>(state, stream),
RandomToTypeNormal<float>(state, stream));
}
template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
std::complex<double> RandomToTypeNormal<std::complex<double> >(uint64_t* state, uint64_t stream) {
return std::complex<double>(RandomToTypeNormal<double>(state, stream),
RandomToTypeNormal<double>(state, stream));
}
template <typename T> class NormalRandomGenerator {
public:
static const bool PacketAccess = true;
// Uses the given "seed" if non-zero, otherwise uses a random seed.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(uint64_t seed = 0) {
m_state = PCG_XSH_RS_state(seed);
#ifdef EIGEN_USE_SYCL
// In SYCL it is not possible to build PCG_XSH_RS_state in one step.
// Therefor, we need two steps to initializate the m_state.
// IN SYCL, the constructor of the functor is s called on the CPU
// and we get the clock seed here from the CPU. However, This seed is
//the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function.
// and only available on the Operator() function (which is called on the GPU).
// Therefore, the thread Id injection is not available at this stage. However when the operator()
//is called the thread ID will be avilable. So inside the opeator,
// we add the thrreadID, BlockId,... (which is equivalent of i)
//to the seed and construct the unique m_state per thead similar to cuda.
m_exec_once =false;
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(
const NormalRandomGenerator& other) {
m_state = other.m_state;
#ifdef EIGEN_USE_SYCL
m_exec_once=other.m_exec_once;
#endif
}
template<typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T operator()(Index i) const {
#ifdef EIGEN_USE_SYCL
if(!m_exec_once) {
// This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread
m_state += (i * 6364136223846793005ULL);
m_exec_once =true;
}
#endif
T result = RandomToTypeNormal<T>(&m_state, i);
return result;
}
template<typename Packet, typename Index> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Packet packetOp(Index i) const {
const int packetSize = internal::unpacket_traits<Packet>::size;
EIGEN_ALIGN_MAX T values[packetSize];
#ifdef EIGEN_USE_SYCL
if(!m_exec_once) {
// This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread
m_state += (i * 6364136223846793005ULL);
m_exec_once =true;
}
#endif
EIGEN_UNROLL_LOOP
for (int j = 0; j < packetSize; ++j) {
values[j] = RandomToTypeNormal<T>(&m_state, i);
}
return internal::pload<Packet>(values);
}
private:
mutable uint64_t m_state;
#ifdef EIGEN_USE_SYCL
mutable bool m_exec_once;
#endif
};
template <typename Scalar>
struct functor_traits<NormalRandomGenerator<Scalar> > {
enum {
// On average, we need to generate about 3 random numbers
// 15 mul, 8 add, 1.5 logs
Cost = 3 * functor_traits<UniformRandomGenerator<Scalar> >::Cost +
15 * NumTraits<Scalar>::AddCost + 8 * NumTraits<Scalar>::AddCost +
3 * functor_traits<scalar_log_op<Scalar> >::Cost / 2,
PacketAccess = NormalRandomGenerator<Scalar>::PacketAccess
};
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H
| 12,385
| 37.346749
| 125
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com>
// Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
#define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
namespace Eigen {
/** \class TensorReverse
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor reverse elements class.
*
*/
namespace internal {
template<typename ReverseDimensions, typename XprType>
struct traits<TensorReverseOp<ReverseDimensions,
XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<typename ReverseDimensions, typename XprType>
struct eval<TensorReverseOp<ReverseDimensions, XprType>, Eigen::Dense>
{
typedef const TensorReverseOp<ReverseDimensions, XprType>& type;
};
template<typename ReverseDimensions, typename XprType>
struct nested<TensorReverseOp<ReverseDimensions, XprType>, 1,
typename eval<TensorReverseOp<ReverseDimensions, XprType> >::type>
{
typedef TensorReverseOp<ReverseDimensions, XprType> type;
};
} // end namespace internal
template<typename ReverseDimensions, typename XprType>
class TensorReverseOp : public TensorBase<TensorReverseOp<ReverseDimensions,
XprType>, WriteAccessors>
{
public:
typedef TensorBase<TensorReverseOp<ReverseDimensions, XprType>, WriteAccessors>Base;
typedef typename Eigen::internal::traits<TensorReverseOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorReverseOp>::type Nested;
typedef typename Eigen::internal::traits<TensorReverseOp>::StorageKind
StorageKind;
typedef typename Eigen::internal::traits<TensorReverseOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp(
const XprType& expr, const ReverseDimensions& reverse_dims)
: m_xpr(expr), m_reverse_dims(reverse_dims) { }
EIGEN_DEVICE_FUNC
const ReverseDimensions& reverse() const { return m_reverse_dims; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorReverseOp)
protected:
typename XprType::Nested m_xpr;
const ReverseDimensions m_reverse_dims;
};
// Eval as rvalue
template<typename ReverseDimensions, typename ArgType, typename Device>
struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device>
{
typedef TensorReverseOp<ReverseDimensions, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<ReverseDimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = NumDims > 0,
PreferBlockAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
typedef internal::TensorIntDivisor<Index> IndexDivisor;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef typename TensorEvaluator<const ArgType, Device>::TensorBlock
ArgTensorBlock;
typedef typename internal::TensorMaterializedBlock<CoeffReturnType, NumDims,
Layout, Index>
TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device),
m_reverse(op.reverse()),
m_device(device)
{
// Reversing a scalar isn't supported yet. It would be a no-op anyway.
EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
// Compute strides
m_dimensions = m_impl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_strides[i] = m_strides[i-1] * m_dimensions[i-1];
if (m_strides[i] > 0) m_fastStrides[i] = IndexDivisor(m_strides[i]);
}
} else {
m_strides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_strides[i] = m_strides[i+1] * m_dimensions[i+1];
if (m_strides[i] > 0) m_fastStrides[i] = IndexDivisor(m_strides[i]);
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
#ifdef EIGEN_USE_THREADS
template <typename EvalSubExprsCallback>
EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
EvaluatorPointerType, EvalSubExprsCallback done) {
m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
}
#endif // EIGEN_USE_THREADS
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex(
Index index) const {
eigen_assert(index < dimensions().TotalSize());
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 1; i > 0; --i) {
Index idx = index / m_fastStrides[i];
index -= idx * m_strides[i];
if (m_reverse[i]) {
idx = m_dimensions[i] - idx - 1;
}
inputIndex += idx * m_strides[i] ;
}
if (m_reverse[0]) {
inputIndex += (m_dimensions[0] - index - 1);
} else {
inputIndex += index;
}
} else {
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 1; ++i) {
Index idx = index / m_fastStrides[i];
index -= idx * m_strides[i];
if (m_reverse[i]) {
idx = m_dimensions[i] - idx - 1;
}
inputIndex += idx * m_strides[i] ;
}
if (m_reverse[NumDims-1]) {
inputIndex += (m_dimensions[NumDims-1] - index - 1);
} else {
inputIndex += index;
}
}
return inputIndex;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(
Index index) const {
return m_impl.coeff(reverseIndex(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
// TODO(ndjaitly): write a better packing routine that uses
// local structure.
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type
values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
const size_t target_size = m_device.lastLevelCacheSize();
// Block evaluation reads underlying memory in reverse order, and default
// cost model does not properly catch this in bytes stored/loaded.
return internal::TensorBlockResourceRequirements::skewed<Scalar>(
target_size)
.addCostPerCoeff({0, 0, 24});
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool /*root_of_expr_ast*/ = false) const {
// TODO(ezhulenev): If underlying tensor expression supports and prefers
// block evaluation we must use it. Currently we use coeff and packet
// access into the underlying tensor expression.
// static const bool useBlockAccessForArgType =
// TensorEvaluator<ArgType, Device>::BlockAccess &&
// TensorEvaluator<ArgType, Device>::PreferBlockAccess;
static const bool isColMajor =
static_cast<int>(Layout) == static_cast<int>(ColMajor);
static const Index inner_dim_idx = isColMajor ? 0 : NumDims - 1;
const bool inner_dim_reversed = m_reverse[inner_dim_idx];
// Offset in the output block.
Index block_offset = 0;
// Offset in the input Tensor.
Index input_offset = reverseIndex(desc.offset());
// Initialize output block iterator state. Dimension in this array are
// always in inner_most -> outer_most order (col major layout).
array<BlockIteratorState, NumDims> it;
for (int i = 0; i < NumDims; ++i) {
const int dim = isColMajor ? i : NumDims - 1 - i;
it[i].size = desc.dimension(dim);
it[i].count = 0;
it[i].reverse = m_reverse[dim];
it[i].block_stride =
i == 0 ? 1 : (it[i - 1].size * it[i - 1].block_stride);
it[i].block_span = it[i].block_stride * (it[i].size - 1);
it[i].input_stride = m_strides[dim];
it[i].input_span = it[i].input_stride * (it[i].size - 1);
if (it[i].reverse) {
it[i].input_stride = -1 * it[i].input_stride;
it[i].input_span = -1 * it[i].input_span;
}
}
// If multiple inner dimensions have the same reverse flag, check if we can
// merge them into a single virtual inner dimension.
int effective_inner_dim = 0;
for (int i = 1; i < NumDims; ++i) {
if (it[i].reverse != it[effective_inner_dim].reverse) break;
if (it[i].block_stride != it[effective_inner_dim].size) break;
if (it[i].block_stride != numext::abs(it[i].input_stride)) break;
it[i].size = it[effective_inner_dim].size * it[i].size;
it[i].block_stride = 1;
it[i].input_stride = (inner_dim_reversed ? -1 : 1);
it[i].block_span = it[i].block_stride * (it[i].size - 1);
it[i].input_span = it[i].input_stride * (it[i].size - 1);
effective_inner_dim = i;
}
eigen_assert(it[effective_inner_dim].block_stride == 1);
eigen_assert(it[effective_inner_dim].input_stride ==
(inner_dim_reversed ? -1 : 1));
const Index inner_dim_size = it[effective_inner_dim].size;
// Prepare storage for the materialized reverse result.
const typename TensorBlock::Storage block_storage =
TensorBlock::prepareStorage(desc, scratch);
CoeffReturnType* block_buffer = block_storage.data();
while (it[NumDims - 1].count < it[NumDims - 1].size) {
// Copy inner-most dimension data from reversed location in input.
Index dst = block_offset;
Index src = input_offset;
// NOTE(ezhulenev): Adding vectorized path with internal::preverse showed
// worse results in benchmarks than a simple coefficient loop.
if (inner_dim_reversed) {
for (Index i = 0; i < inner_dim_size; ++i) {
block_buffer[dst] = m_impl.coeff(src);
++dst;
--src;
}
} else {
for (Index i = 0; i < inner_dim_size; ++i) {
block_buffer[dst] = m_impl.coeff(src);
++dst;
++src;
}
}
// For the 1d tensor we need to generate only one inner-most dimension.
if ((NumDims - effective_inner_dim) == 1) break;
// Update offset.
for (Index i = effective_inner_dim + 1; i < NumDims; ++i) {
if (++it[i].count < it[i].size) {
block_offset += it[i].block_stride;
input_offset += it[i].input_stride;
break;
}
if (i != NumDims - 1) it[i].count = 0;
block_offset -= it[i].block_span;
input_offset -= it[i].input_span;
}
}
return block_storage.AsTensorMaterializedBlock();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>());
for (int i = 0; i < NumDims; ++i) {
if (m_reverse[i]) {
compute_cost += 2 * TensorOpCost::AddCost<Index>();
}
}
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
}
EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
Dimensions m_dimensions;
array<Index, NumDims> m_strides;
array<IndexDivisor, NumDims> m_fastStrides;
TensorEvaluator<ArgType, Device> m_impl;
ReverseDimensions m_reverse;
const Device EIGEN_DEVICE_REF m_device;
private:
struct BlockIteratorState {
BlockIteratorState()
: size(0),
count(0),
reverse(false),
block_stride(0),
block_span(0),
input_stride(0),
input_span(0) {}
Index size;
Index count;
bool reverse;
Index block_stride;
Index block_span;
Index input_stride;
Index input_span;
};
};
// Eval as lvalue
template <typename ReverseDimensions, typename ArgType, typename Device>
struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device>
: public TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
Device> {
typedef TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
Device> Base;
typedef TensorReverseOp<ReverseDimensions, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<ReverseDimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device) {}
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions() const { return this->m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
return this->m_impl.coeffRef(this->reverseIndex(index));
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x) {
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
// This code is pilfered from TensorMorphing.h
EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
this->coeffRef(index+i) = values[i];
}
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
| 16,938
| 35.349785
| 90
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
#define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
namespace Eigen {
/** \class TensorShuffling
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor shuffling class.
*
*
*/
namespace internal {
template<typename Shuffle, typename XprType>
struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<typename Shuffle, typename XprType>
struct eval<TensorShufflingOp<Shuffle, XprType>, Eigen::Dense>
{
typedef const TensorShufflingOp<Shuffle, XprType>& type;
};
template<typename Shuffle, typename XprType>
struct nested<TensorShufflingOp<Shuffle, XprType>, 1, typename eval<TensorShufflingOp<Shuffle, XprType> >::type>
{
typedef TensorShufflingOp<Shuffle, XprType> type;
};
} // end namespace internal
template<typename Shuffle, typename XprType>
class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType> >
{
public:
typedef TensorBase<TensorShufflingOp<Shuffle, XprType> > Base;
typedef typename Eigen::internal::traits<TensorShufflingOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorShufflingOp>::type Nested;
typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shfl)
: m_xpr(expr), m_shuffle(shfl) {}
EIGEN_DEVICE_FUNC
const Shuffle& shufflePermutation() const { return m_shuffle; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorShufflingOp)
protected:
typename XprType::Nested m_xpr;
const Shuffle m_shuffle;
};
// Eval as rvalue
template<typename Shuffle, typename ArgType, typename Device>
struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
{
typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Self;
typedef TensorShufflingOp<Shuffle, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = false,
PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = TensorEvaluator<ArgType, Device>::RawAccess,
PreferBlockAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
typedef internal::TensorBlockScratchAllocator<Device> TensorBlockScratch;
typedef typename internal::TensorMaterializedBlock<ScalarNoConst, NumDims,
Layout, Index>
TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_device(device),
m_impl(op.expression(), device)
{
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
const Shuffle& shuffle = op.shufflePermutation();
m_is_identity = true;
for (int i = 0; i < NumDims; ++i) {
m_shuffle[i] = static_cast<int>(shuffle[i]);
m_dimensions[i] = input_dims[shuffle[i]];
m_inverseShuffle[shuffle[i]] = i;
if (m_is_identity && shuffle[i] != i) {
m_is_identity = false;
}
}
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_unshuffledInputStrides[0] = 1;
m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_unshuffledInputStrides[i] =
m_unshuffledInputStrides[i - 1] * input_dims[i - 1];
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(
m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1));
}
} else {
m_unshuffledInputStrides[NumDims - 1] = 1;
m_outputStrides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_unshuffledInputStrides[i] =
m_unshuffledInputStrides[i + 1] * input_dims[i + 1];
m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(
m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1));
}
}
for (int i = 0; i < NumDims; ++i) {
m_inputStrides[i] = m_unshuffledInputStrides[shuffle[i]];
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
#ifdef EIGEN_USE_THREADS
template <typename EvalSubExprsCallback>
EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
EvaluatorPointerType, EvalSubExprsCallback done) {
m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
}
#endif // EIGEN_USE_THREADS
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
if (m_is_identity) {
return m_impl.coeff(index);
} else {
return m_impl.coeff(srcCoeff(index));
}
}
template <int LoadMode, typename Self, bool ImplPacketAccess>
struct PacketLoader {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static PacketReturnType Run(const Self& self, Index index) {
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = self.coeff(index + i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
};
template<int LoadMode, typename Self>
struct PacketLoader<LoadMode, Self, true> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
static PacketReturnType Run(const Self& self, Index index) {
if (self.m_is_identity) {
return self.m_impl.template packet<LoadMode>(index);
} else {
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
values[i] = self.coeff(index + i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
};
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
return PacketLoader<LoadMode, Self, TensorEvaluator<ArgType, Device>::PacketAccess>::Run(*this, index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
internal::TensorBlockResourceRequirements getResourceRequirements() const {
static const int inner_dim =
Layout == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
const size_t target_size = m_device.firstLevelCacheSize();
const bool inner_dim_shuffled = m_shuffle[inner_dim] != inner_dim;
// Shuffled inner dimensions leads to a random memory access, which is not
// captured by default cost model bytes loaded/stored. We add this cost
// explicitly. The number of cycles picked based on the benchmarks.
// TODO(ezhulenev): This number was picked based on a very questionable
// benchmarks, add benchmarks that are representative of real workloads.
using BlockRequirements = internal::TensorBlockResourceRequirements;
if (inner_dim_shuffled) {
return BlockRequirements::uniform<Scalar>(target_size)
.addCostPerCoeff({0, 0, NumDims * 28});
} else {
return BlockRequirements::skewed<Scalar>(target_size);
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
bool root_of_expr_ast = false) const {
assert(m_impl.data() != NULL);
typedef internal::TensorBlockIO<ScalarNoConst, Index, NumDims, Layout>
TensorBlockIO;
typedef typename TensorBlockIO::Dst TensorBlockIODst;
typedef typename TensorBlockIO::Src TensorBlockIOSrc;
const typename TensorBlock::Storage block_storage =
TensorBlock::prepareStorage(
desc, scratch, /*allow_strided_storage=*/root_of_expr_ast);
typename TensorBlockIO::Dimensions input_strides(m_unshuffledInputStrides);
TensorBlockIOSrc src(input_strides, m_impl.data(), srcCoeff(desc.offset()));
TensorBlockIODst dst(block_storage.dimensions(), block_storage.strides(),
block_storage.data());
typename TensorBlockIO::DimensionsMap dst_to_src_dim_map(m_shuffle);
TensorBlockIO::Copy(dst, src, dst_to_src_dim_map);
return block_storage.AsTensorMaterializedBlock();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
const double compute_cost = m_is_identity ? TensorOpCost::AddCost<Index>() :
NumDims * (2 * TensorOpCost::AddCost<Index>() +
2 * TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>());
return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost, m_is_identity /* vectorized */, PacketSize);
}
EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index GetBlockOutputIndex(
Index input_index,
const DSizes<Index, NumDims>& input_block_strides,
const DSizes<Index, NumDims>& output_block_strides,
const DSizes<internal::TensorIntDivisor<Index>, NumDims>& fast_input_block_strides) const {
Index output_index = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = input_index / fast_input_block_strides[i];
output_index += idx * output_block_strides[m_inverseShuffle[i]];
input_index -= idx * input_block_strides[i];
}
return output_index + input_index *
output_block_strides[m_inverseShuffle[0]];
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = input_index / fast_input_block_strides[i];
output_index += idx * output_block_strides[m_inverseShuffle[i]];
input_index -= idx * input_block_strides[i];
}
return output_index + input_index *
output_block_strides[m_inverseShuffle[NumDims - 1]];
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_fastOutputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
return inputIndex + index * m_inputStrides[0];
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_fastOutputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
return inputIndex + index * m_inputStrides[NumDims - 1];
}
}
Dimensions m_dimensions;
bool m_is_identity;
array<int, NumDims> m_shuffle;
array<Index, NumDims> m_inverseShuffle; // TODO(ezhulenev): Make it int type.
array<Index, NumDims> m_outputStrides;
array<internal::TensorIntDivisor<Index>, NumDims> m_fastOutputStrides;
array<Index, NumDims> m_inputStrides;
array<Index, NumDims> m_unshuffledInputStrides;
const Device EIGEN_DEVICE_REF m_device;
TensorEvaluator<ArgType, Device> m_impl;
};
// Eval as lvalue
template<typename Shuffle, typename ArgType, typename Device>
struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device>
: public TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
{
typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Base;
typedef TensorShufflingOp<Shuffle, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
enum {
IsAligned = false,
PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
BlockAccess = TensorEvaluator<ArgType, Device>::RawAccess,
PreferBlockAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
RawAccess = false
};
typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockDescriptor<NumDims, Index> TensorBlockDesc;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device)
{ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
{
return this->m_impl.coeffRef(this->srcCoeff(index));
}
template <int StoreMode> EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
EIGEN_UNROLL_LOOP
for (int i = 0; i < PacketSize; ++i) {
this->coeffRef(index+i) = values[i];
}
}
template <typename TensorBlock>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(
const TensorBlockDesc& desc, const TensorBlock& block) {
eigen_assert(this->m_impl.data() != NULL);
typedef internal::TensorBlockIO<ScalarNoConst, Index, NumDims, Layout>
TensorBlockIO;
typedef typename TensorBlockIO::Dst TensorBlockIODst;
typedef typename TensorBlockIO::Src TensorBlockIOSrc;
const Scalar* block_buffer = block.data();
// TODO(ezhulenev): TensorBlockIO should be able to read from any Eigen
// expression with coefficient and packet access as `src`.
void* mem = NULL;
if (block_buffer == NULL) {
mem = this->m_device.allocate(desc.size() * sizeof(Scalar));
ScalarNoConst* buf = static_cast<ScalarNoConst*>(mem);
typedef internal::TensorBlockAssignment<
ScalarNoConst, NumDims, typename TensorBlock::XprType, Index>
TensorBlockAssignment;
TensorBlockAssignment::Run(
TensorBlockAssignment::target(
desc.dimensions(), internal::strides<Layout>(desc.dimensions()),
buf),
block.expr());
block_buffer = buf;
}
// Read from block.
TensorBlockIOSrc src(internal::strides<Layout>(desc.dimensions()),
block_buffer);
// Write to the output buffer.
typename TensorBlockIO::Dimensions output_strides(
this->m_unshuffledInputStrides);
typename TensorBlockIO::Dimensions output_dimensions;
for (int i = 0; i < NumDims; ++i) {
output_dimensions[this->m_shuffle[i]] = desc.dimension(i);
}
TensorBlockIODst dst(output_dimensions, output_strides, this->m_impl.data(),
this->srcCoeff(desc.offset()));
// Reorder dimensions according to the shuffle.
typename TensorBlockIO::DimensionsMap dst_to_src_dim_map;
for (int i = 0; i < NumDims; ++i) {
dst_to_src_dim_map[i] = static_cast<int>(this->m_inverseShuffle[i]);
}
TensorBlockIO::Copy(dst, src, dst_to_src_dim_map);
// Deallocate temporary buffer used for the block materialization.
if (mem != NULL) this->m_device.deallocate(mem);
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
| 18,256
| 37.680085
| 112
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
#define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
namespace Eigen {
/** \class TensorStriding
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor striding class.
*
*
*/
namespace internal {
template<typename Strides, typename XprType>
struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
typedef typename XprTraits::PointerType PointerType;
};
template<typename Strides, typename XprType>
struct eval<TensorStridingOp<Strides, XprType>, Eigen::Dense>
{
typedef const TensorStridingOp<Strides, XprType>EIGEN_DEVICE_REF type;
};
template<typename Strides, typename XprType>
struct nested<TensorStridingOp<Strides, XprType>, 1, typename eval<TensorStridingOp<Strides, XprType> >::type>
{
typedef TensorStridingOp<Strides, XprType> type;
};
} // end namespace internal
template<typename Strides, typename XprType>
class TensorStridingOp : public TensorBase<TensorStridingOp<Strides, XprType> >
{
public:
typedef TensorBase<TensorStridingOp<Strides, XprType> > Base;
typedef typename Eigen::internal::traits<TensorStridingOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorStridingOp>::type Nested;
typedef typename Eigen::internal::traits<TensorStridingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorStridingOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(const XprType& expr, const Strides& dims)
: m_xpr(expr), m_dims(dims) {}
EIGEN_DEVICE_FUNC
const Strides& strides() const { return m_dims; }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingOp)
protected:
typename XprType::Nested m_xpr;
const Strides m_dims;
};
// Eval as rvalue
template<typename Strides, typename ArgType, typename Device>
struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
{
typedef TensorStridingOp<Strides, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device)
{
m_dimensions = m_impl.dimensions();
for (int i = 0; i < NumDims; ++i) {
m_dimensions[i] =Eigen::numext::ceil(static_cast<float>(m_dimensions[i]) / op.strides()[i]);
}
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_outputStrides[0] = 1;
m_inputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
m_inputStrides[i-1] *= op.strides()[i-1];
}
m_inputStrides[NumDims-1] *= op.strides()[NumDims-1];
} else { // RowMajor
m_outputStrides[NumDims-1] = 1;
m_inputStrides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
m_inputStrides[i+1] *= op.strides()[i+1];
}
m_inputStrides[0] *= op.strides()[0];
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType/*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_impl.coeff(srcCoeff(index));
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
Index inputIndices[] = {0, 0};
Index indices[] = {index, index + PacketSize - 1};
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / m_outputStrides[i];
const Index idx1 = indices[1] / m_outputStrides[i];
inputIndices[0] += idx0 * m_inputStrides[i];
inputIndices[1] += idx1 * m_inputStrides[i];
indices[0] -= idx0 * m_outputStrides[i];
indices[1] -= idx1 * m_outputStrides[i];
}
inputIndices[0] += indices[0] * m_inputStrides[0];
inputIndices[1] += indices[1] * m_inputStrides[0];
} else { // RowMajor
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / m_outputStrides[i];
const Index idx1 = indices[1] / m_outputStrides[i];
inputIndices[0] += idx0 * m_inputStrides[i];
inputIndices[1] += idx1 * m_inputStrides[i];
indices[0] -= idx0 * m_outputStrides[i];
indices[1] -= idx1 * m_outputStrides[i];
}
inputIndices[0] += indices[0] * m_inputStrides[NumDims-1];
inputIndices[1] += indices[1] * m_inputStrides[NumDims-1];
}
if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
return rslt;
}
else {
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
values[0] = m_impl.coeff(inputIndices[0]);
values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
EIGEN_UNROLL_LOOP
for (int i = 1; i < PacketSize-1; ++i) {
values[i] = coeff(index+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost<Index>() +
TensorOpCost::MulCost<Index>() +
TensorOpCost::DivCost<Index>()) +
TensorOpCost::MulCost<Index>();
if (vectorized) {
compute_cost *= 2; // packet() computes two indices
}
const int innerDim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : (NumDims - 1);
return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) +
// Computation is not vectorized per se, but it is done once per packet.
TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
}
EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; }
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
{
Index inputIndex = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
inputIndex += index * m_inputStrides[0];
} else { // RowMajor
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
inputIndex += index * m_inputStrides[NumDims-1];
}
return inputIndex;
}
Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides;
array<Index, NumDims> m_inputStrides;
TensorEvaluator<ArgType, Device> m_impl;
};
// Eval as lvalue
template<typename Strides, typename ArgType, typename Device>
struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device>
: public TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
{
typedef TensorStridingOp<Strides, ArgType> XprType;
typedef TensorEvaluator<const XprType, Device> Base;
// typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
// typedef DSizes<Index, NumDims> Dimensions;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
PreferBlockAccess = false,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
RawAccess = false
};
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device) { }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
return this->m_impl.coeffRef(this->srcCoeff(index));
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+PacketSize-1 < this->dimensions().TotalSize());
Index inputIndices[] = {0, 0};
Index indices[] = {index, index + PacketSize - 1};
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
EIGEN_UNROLL_LOOP
for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / this->m_outputStrides[i];
const Index idx1 = indices[1] / this->m_outputStrides[i];
inputIndices[0] += idx0 * this->m_inputStrides[i];
inputIndices[1] += idx1 * this->m_inputStrides[i];
indices[0] -= idx0 * this->m_outputStrides[i];
indices[1] -= idx1 * this->m_outputStrides[i];
}
inputIndices[0] += indices[0] * this->m_inputStrides[0];
inputIndices[1] += indices[1] * this->m_inputStrides[0];
} else { // RowMajor
EIGEN_UNROLL_LOOP
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / this->m_outputStrides[i];
const Index idx1 = indices[1] / this->m_outputStrides[i];
inputIndices[0] += idx0 * this->m_inputStrides[i];
inputIndices[1] += idx1 * this->m_inputStrides[i];
indices[0] -= idx0 * this->m_outputStrides[i];
indices[1] -= idx1 * this->m_outputStrides[i];
}
inputIndices[0] += indices[0] * this->m_inputStrides[NumDims-1];
inputIndices[1] += indices[1] * this->m_inputStrides[NumDims-1];
}
if (inputIndices[1] - inputIndices[0] == PacketSize - 1) {
this->m_impl.template writePacket<Unaligned>(inputIndices[0], x);
}
else {
EIGEN_ALIGN_MAX Scalar values[PacketSize];
internal::pstore<Scalar, PacketReturnType>(values, x);
this->m_impl.coeffRef(inputIndices[0]) = values[0];
this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize-1];
EIGEN_UNROLL_LOOP
for (int i = 1; i < PacketSize-1; ++i) {
this->coeffRef(index+i) = values[i];
}
}
}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
| 13,513
| 37.945245
| 112
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2017 Gagan Goel <gagan.nith@gmail.com>
// Copyright (C) 2017 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
#define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
namespace Eigen {
/** \class TensorTrace
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor Trace class.
*
*
*/
namespace internal {
template<typename Dims, typename XprType>
struct traits<TensorTraceOp<Dims, XprType> > : public traits<XprType>
{
typedef typename XprType::Scalar Scalar;
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
static const int Layout = XprTraits::Layout;
};
template<typename Dims, typename XprType>
struct eval<TensorTraceOp<Dims, XprType>, Eigen::Dense>
{
typedef const TensorTraceOp<Dims, XprType>& type;
};
template<typename Dims, typename XprType>
struct nested<TensorTraceOp<Dims, XprType>, 1, typename eval<TensorTraceOp<Dims, XprType> >::type>
{
typedef TensorTraceOp<Dims, XprType> type;
};
} // end namespace internal
template<typename Dims, typename XprType>
class TensorTraceOp : public TensorBase<TensorTraceOp<Dims, XprType> >
{
public:
typedef typename Eigen::internal::traits<TensorTraceOp>::Scalar Scalar;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename Eigen::internal::nested<TensorTraceOp>::type Nested;
typedef typename Eigen::internal::traits<TensorTraceOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorTraceOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(const XprType& expr, const Dims& dims)
: m_xpr(expr), m_dims(dims) {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dims& dims() const { return m_dims; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
const Dims m_dims;
};
// Eval as rvalue
template<typename Dims, typename ArgType, typename Device>
struct TensorEvaluator<const TensorTraceOp<Dims, ArgType>, Device>
{
typedef TensorTraceOp<Dims, ArgType> XprType;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumReducedDims = internal::array_size<Dims>::value;
static const int NumOutputDims = NumInputDims - NumReducedDims;
typedef typename XprType::Index Index;
typedef DSizes<Index, NumOutputDims> Dimensions;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
typedef StorageMemory<CoeffReturnType, Device> Storage;
typedef typename Storage::Type EvaluatorPointerType;
enum {
IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
BlockAccess = false,
PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false,
RawAccess = false
};
//===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
typedef internal::TensorBlockNotImplemented TensorBlock;
//===--------------------------------------------------------------------===//
EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_traceDim(1), m_device(device)
{
EIGEN_STATIC_ASSERT((NumOutputDims >= 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE);
for (int i = 0; i < NumInputDims; ++i) {
m_reduced[i] = false;
}
const Dims& op_dims = op.dims();
for (int i = 0; i < NumReducedDims; ++i) {
eigen_assert(op_dims[i] >= 0);
eigen_assert(op_dims[i] < NumInputDims);
m_reduced[op_dims[i]] = true;
}
// All the dimensions should be distinct to compute the trace
int num_distinct_reduce_dims = 0;
for (int i = 0; i < NumInputDims; ++i) {
if (m_reduced[i]) {
++num_distinct_reduce_dims;
}
}
eigen_assert(num_distinct_reduce_dims == NumReducedDims);
// Compute the dimensions of the result.
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
int output_index = 0;
int reduced_index = 0;
for (int i = 0; i < NumInputDims; ++i) {
if (m_reduced[i]) {
m_reducedDims[reduced_index] = input_dims[i];
if (reduced_index > 0) {
// All the trace dimensions must have the same size
eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]);
}
++reduced_index;
}
else {
m_dimensions[output_index] = input_dims[i];
++output_index;
}
}
if (NumReducedDims != 0) {
m_traceDim = m_reducedDims[0];
}
// Compute the output strides
if (NumOutputDims > 0) {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_outputStrides[0] = 1;
for (int i = 1; i < NumOutputDims; ++i) {
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
}
}
else {
m_outputStrides.back() = 1;
for (int i = NumOutputDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
}
}
}
// Compute the input strides
if (NumInputDims > 0) {
array<Index, NumInputDims> input_strides;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
input_strides[0] = 1;
for (int i = 1; i < NumInputDims; ++i) {
input_strides[i] = input_strides[i - 1] * input_dims[i - 1];
}
}
else {
input_strides.back() = 1;
for (int i = NumInputDims - 2; i >= 0; --i) {
input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
}
}
output_index = 0;
reduced_index = 0;
for (int i = 0; i < NumInputDims; ++i) {
if(m_reduced[i]) {
m_reducedStrides[reduced_index] = input_strides[i];
++reduced_index;
}
else {
m_preservedStrides[output_index] = input_strides[i];
++output_index;
}
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
return m_dimensions;
}
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}
EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
// Initialize the result
CoeffReturnType result = internal::cast<int, CoeffReturnType>(0);
Index index_stride = 0;
for (int i = 0; i < NumReducedDims; ++i) {
index_stride += m_reducedStrides[i];
}
// If trace is requested along all dimensions, starting index would be 0
Index cur_index = 0;
if (NumOutputDims != 0)
cur_index = firstInput(index);
for (Index i = 0; i < m_traceDim; ++i) {
result += m_impl.coeff(cur_index);
cur_index += index_stride;
}
return result;
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
for (int i = 0; i < PacketSize; ++i) {
values[i] = coeff(index + i);
}
PacketReturnType result = internal::ploadt<PacketReturnType, LoadMode>(values);
return result;
}
#ifdef EIGEN_USE_SYCL
// binding placeholder accessors to a command group handler for SYCL
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
m_impl.bind(cgh);
}
#endif
protected:
// Given the output index, finds the first index in the input tensor used to compute the trace
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
Index startInput = 0;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumOutputDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
startInput += idx * m_preservedStrides[i];
index -= idx * m_outputStrides[i];
}
startInput += index * m_preservedStrides[0];
}
else {
for (int i = 0; i < NumOutputDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
startInput += idx * m_preservedStrides[i];
index -= idx * m_outputStrides[i];
}
startInput += index * m_preservedStrides[NumOutputDims - 1];
}
return startInput;
}
Dimensions m_dimensions;
TensorEvaluator<ArgType, Device> m_impl;
// Initialize the size of the trace dimension
Index m_traceDim;
const Device EIGEN_DEVICE_REF m_device;
array<bool, NumInputDims> m_reduced;
array<Index, NumReducedDims> m_reducedDims;
array<Index, NumOutputDims> m_outputStrides;
array<Index, NumReducedDims> m_reducedStrides;
array<Index, NumOutputDims> m_preservedStrides;
};
} // End namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
| 10,152
| 32.398026
| 129
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
#define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
namespace Eigen {
namespace internal {
template<typename Scalar, int Options>
class compute_tensor_flags
{
enum {
is_dynamic_size_storage = 1,
is_aligned =
(
((Options&DontAlign)==0) && (
#if EIGEN_MAX_STATIC_ALIGN_BYTES>0
(!is_dynamic_size_storage)
#else
0
#endif
|
#if EIGEN_MAX_ALIGN_BYTES>0
is_dynamic_size_storage
#else
0
#endif
)
),
packet_access_bit = packet_traits<Scalar>::Vectorizable && is_aligned ? PacketAccessBit : 0
};
public:
enum { ret = packet_access_bit };
};
template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
struct traits<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
{
typedef Scalar_ Scalar;
typedef Dense StorageKind;
typedef IndexType_ Index;
static const int NumDimensions = NumIndices_;
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum {
Options = Options_,
Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit)
};
template <typename T> struct MakePointer {
typedef T* Type;
};
typedef typename MakePointer<Scalar>::Type PointerType;
};
template<typename Scalar_, typename Dimensions, int Options_, typename IndexType_>
struct traits<TensorFixedSize<Scalar_, Dimensions, Options_, IndexType_> >
{
typedef Scalar_ Scalar;
typedef Dense StorageKind;
typedef IndexType_ Index;
static const int NumDimensions = array_size<Dimensions>::value;
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum {
Options = Options_,
Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit)
};
template <typename T> struct MakePointer {
typedef T* Type;
};
typedef typename MakePointer<Scalar>::Type PointerType;
};
template<typename PlainObjectType, int Options_, template <class> class MakePointer_>
struct traits<TensorMap<PlainObjectType, Options_, MakePointer_> >
: public traits<PlainObjectType>
{
typedef traits<PlainObjectType> BaseTraits;
typedef typename BaseTraits::Scalar Scalar;
typedef typename BaseTraits::StorageKind StorageKind;
typedef typename BaseTraits::Index Index;
static const int NumDimensions = BaseTraits::NumDimensions;
static const int Layout = BaseTraits::Layout;
enum {
Options = Options_,
Flags = BaseTraits::Flags
};
template <class T> struct MakePointer {
// Intermediate typedef to workaround MSVC issue.
typedef MakePointer_<T> MakePointerT;
typedef typename MakePointerT::Type Type;
};
typedef typename MakePointer<Scalar>::Type PointerType;
};
template<typename PlainObjectType>
struct traits<TensorRef<PlainObjectType> >
: public traits<PlainObjectType>
{
typedef traits<PlainObjectType> BaseTraits;
typedef typename BaseTraits::Scalar Scalar;
typedef typename BaseTraits::StorageKind StorageKind;
typedef typename BaseTraits::Index Index;
static const int NumDimensions = BaseTraits::NumDimensions;
static const int Layout = BaseTraits::Layout;
enum {
Options = BaseTraits::Options,
Flags = BaseTraits::Flags
};
typedef typename BaseTraits::PointerType PointerType;
};
template<typename _Scalar, int NumIndices_, int Options, typename IndexType_>
struct eval<Tensor<_Scalar, NumIndices_, Options, IndexType_>, Eigen::Dense>
{
typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>EIGEN_DEVICE_REF type;
};
template<typename _Scalar, int NumIndices_, int Options, typename IndexType_>
struct eval<const Tensor<_Scalar, NumIndices_, Options, IndexType_>, Eigen::Dense>
{
typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>EIGEN_DEVICE_REF type;
};
template<typename Scalar_, typename Dimensions, int Options, typename IndexType_>
struct eval<TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>, Eigen::Dense>
{
typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>EIGEN_DEVICE_REF type;
};
template<typename Scalar_, typename Dimensions, int Options, typename IndexType_>
struct eval<const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>, Eigen::Dense>
{
typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>EIGEN_DEVICE_REF type;
};
template<typename PlainObjectType, int Options, template <class> class MakePointer>
struct eval<TensorMap<PlainObjectType, Options, MakePointer>, Eigen::Dense>
{
typedef const TensorMap<PlainObjectType, Options, MakePointer>EIGEN_DEVICE_REF type;
};
template<typename PlainObjectType, int Options, template <class> class MakePointer>
struct eval<const TensorMap<PlainObjectType, Options, MakePointer>, Eigen::Dense>
{
typedef const TensorMap<PlainObjectType, Options, MakePointer>EIGEN_DEVICE_REF type;
};
template<typename PlainObjectType>
struct eval<TensorRef<PlainObjectType>, Eigen::Dense>
{
typedef const TensorRef<PlainObjectType>EIGEN_DEVICE_REF type;
};
template<typename PlainObjectType>
struct eval<const TensorRef<PlainObjectType>, Eigen::Dense>
{
typedef const TensorRef<PlainObjectType>EIGEN_DEVICE_REF type;
};
// TODO nested<> does not exist anymore in Eigen/Core, and it thus has to be removed in favor of ref_selector.
template<typename T, int n=1, typename PlainObject = void> struct nested
{
typedef typename ref_selector<T>::type type;
};
template <typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
struct nested<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
{
typedef const Tensor<Scalar_, NumIndices_, Options_, IndexType_>EIGEN_DEVICE_REF type;
};
template <typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
struct nested<const Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
{
typedef const Tensor<Scalar_, NumIndices_, Options_, IndexType_>EIGEN_DEVICE_REF type;
};
template <typename Scalar_, typename Dimensions, int Options, typename IndexType_>
struct nested<TensorFixedSize<Scalar_, Dimensions, Options, IndexType_> >
{
typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>EIGEN_DEVICE_REF type;
};
template <typename Scalar_, typename Dimensions, int Options, typename IndexType_>
struct nested<const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_> >
{
typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>EIGEN_DEVICE_REF type;
};
template <typename PlainObjectType>
struct nested<TensorRef<PlainObjectType> >
{
typedef const TensorRef<PlainObjectType>EIGEN_DEVICE_REF type;
};
template <typename PlainObjectType>
struct nested<const TensorRef<PlainObjectType> >
{
typedef const TensorRef<PlainObjectType>EIGEN_DEVICE_REF type;
};
} // end namespace internal
// Convolutional layers take in an input tensor of shape (D, R, C, B), or (D, C,
// R, B), and convolve it with a set of filters, which can also be presented as
// a tensor (D, K, K, M), where M is the number of filters, K is the filter
// size, and each 3-dimensional tensor of size (D, K, K) is a filter. For
// simplicity we assume that we always use square filters (which is usually the
// case in images), hence the two Ks in the tensor dimension. It also takes in
// a few additional parameters:
// Stride (S): The convolution stride is the offset between locations where we
// apply the filters. A larger stride means that the output will be
// spatially smaller.
// Padding (P): The padding we apply to the input tensor along the R and C
// dimensions. This is usually used to make sure that the spatial
// dimensions of the output matches our intention.
//
// Two types of padding are often used:
// SAME: The pad value is computed so that the output will have size
// R/S and C/S.
// VALID: no padding is carried out.
// When we do padding, the padded values at the padded locations are usually
// zero.
//
// The output dimensions for convolution, when given all the parameters above,
// are as follows:
// When Padding = SAME: the output size is (B, R', C', M), where
// R' = ceil(float(R) / float(S))
// C' = ceil(float(C) / float(S))
// where ceil is the ceiling function. The input tensor is padded with 0 as
// needed. The number of padded rows and columns are computed as:
// Pr = ((R' - 1) * S + K - R) / 2
// Pc = ((C' - 1) * S + K - C) / 2
// when the stride is 1, we have the simplified case R'=R, C'=C, Pr=Pc=(K-1)/2.
// This is where SAME comes from - the output has the same size as the input has.
// When Padding = VALID: the output size is computed as
// R' = ceil(float(R - K + 1) / float(S))
// C' = ceil(float(C - K + 1) / float(S))
// and the number of padded rows and columns are computed in the same way as in
// the SAME case.
// When the stride is 1, we have the simplified case R'=R-K+1, C'=C-K+1, Pr=0,
// Pc=0.
typedef enum {
PADDING_VALID = 1,
PADDING_SAME = 2
} PaddingType;
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
| 9,432
| 34.596226
| 110
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
#define EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
namespace Eigen {
namespace internal {
template <uint64_t n>
struct static_val {
static const uint64_t value = n;
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator uint64_t() const { return n; }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val() { }
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) {
EIGEN_UNUSED_VARIABLE(v);
eigen_assert(v == n);
}
};
template <typename HIGH = uint64_t, typename LOW = uint64_t>
struct TensorUInt128
{
HIGH high;
LOW low;
template<typename OTHER_HIGH, typename OTHER_LOW>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
TensorUInt128(const TensorUInt128<OTHER_HIGH, OTHER_LOW>& other) : high(other.high), low(other.low) {
EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE);
}
template<typename OTHER_HIGH, typename OTHER_LOW>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
TensorUInt128& operator = (const TensorUInt128<OTHER_HIGH, OTHER_LOW>& other) {
EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE);
high = other.high;
low = other.low;
return *this;
}
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
explicit TensorUInt128(const T& x) : high(0), low(x) {
eigen_assert((static_cast<typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type>(x) <= NumTraits<uint64_t>::highest()));
eigen_assert(x >= 0);
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
TensorUInt128(HIGH y, LOW x) : high(y), low(x) { }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator LOW() const {
return low;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LOW lower() const {
return low;
}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HIGH upper() const {
return high;
}
};
template <typename HL, typename LL, typename HR, typename LR>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
bool operator == (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
return (lhs.high == rhs.high) & (lhs.low == rhs.low);
}
template <typename HL, typename LL, typename HR, typename LR>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
bool operator != (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
return (lhs.high != rhs.high) | (lhs.low != rhs.low);
}
template <typename HL, typename LL, typename HR, typename LR>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
bool operator >= (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
if (lhs.high != rhs.high) {
return lhs.high > rhs.high;
}
return lhs.low >= rhs.low;
}
template <typename HL, typename LL, typename HR, typename LR>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
bool operator < (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
if (lhs.high != rhs.high) {
return lhs.high < rhs.high;
}
return lhs.low < rhs.low;
}
template <typename HL, typename LL, typename HR, typename LR>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
TensorUInt128<uint64_t, uint64_t> operator + (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
TensorUInt128<uint64_t, uint64_t> result(lhs.high + rhs.high, lhs.low + rhs.low);
if (result.low < rhs.low) {
result.high += 1;
}
return result;
}
template <typename HL, typename LL, typename HR, typename LR>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
TensorUInt128<uint64_t, uint64_t> operator - (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
TensorUInt128<uint64_t, uint64_t> result(lhs.high - rhs.high, lhs.low - rhs.low);
if (result.low > lhs.low) {
result.high -= 1;
}
return result;
}
template <typename HL, typename LL, typename HR, typename LR>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorUInt128<uint64_t, uint64_t> operator * (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
// Split each 128-bit integer into 4 32-bit integers, and then do the
// multiplications by hand as follow:
// lhs a b c d
// rhs e f g h
// -----------
// ah bh ch dh
// bg cg dg
// cf df
// de
// The result is stored in 2 64bit integers, high and low.
const uint64_t LOW = 0x00000000FFFFFFFFLL;
const uint64_t HIGH = 0xFFFFFFFF00000000LL;
uint64_t d = lhs.low & LOW;
uint64_t c = (lhs.low & HIGH) >> 32LL;
uint64_t b = lhs.high & LOW;
uint64_t a = (lhs.high & HIGH) >> 32LL;
uint64_t h = rhs.low & LOW;
uint64_t g = (rhs.low & HIGH) >> 32LL;
uint64_t f = rhs.high & LOW;
uint64_t e = (rhs.high & HIGH) >> 32LL;
// Compute the low 32 bits of low
uint64_t acc = d * h;
uint64_t low = acc & LOW;
// Compute the high 32 bits of low. Add a carry every time we wrap around
acc >>= 32LL;
uint64_t carry = 0;
uint64_t acc2 = acc + c * h;
if (acc2 < acc) {
carry++;
}
acc = acc2 + d * g;
if (acc < acc2) {
carry++;
}
low |= (acc << 32LL);
// Carry forward the high bits of acc to initiate the computation of the
// low 32 bits of high
acc2 = (acc >> 32LL) | (carry << 32LL);
carry = 0;
acc = acc2 + b * h;
if (acc < acc2) {
carry++;
}
acc2 = acc + c * g;
if (acc2 < acc) {
carry++;
}
acc = acc2 + d * f;
if (acc < acc2) {
carry++;
}
uint64_t high = acc & LOW;
// Start to compute the high 32 bits of high.
acc2 = (acc >> 32LL) | (carry << 32LL);
acc = acc2 + a * h;
acc2 = acc + b * g;
acc = acc2 + c * f;
acc2 = acc + d * e;
high |= (acc2 << 32LL);
return TensorUInt128<uint64_t, uint64_t>(high, low);
}
template <typename HL, typename LL, typename HR, typename LR>
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorUInt128<uint64_t, uint64_t> operator / (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs)
{
if (rhs == TensorUInt128<static_val<0>, static_val<1> >(1)) {
return TensorUInt128<uint64_t, uint64_t>(lhs.high, lhs.low);
} else if (lhs < rhs) {
return TensorUInt128<uint64_t, uint64_t>(0);
} else {
// calculate the biggest power of 2 times rhs that's less than or equal to lhs
TensorUInt128<uint64_t, uint64_t> power2(1);
TensorUInt128<uint64_t, uint64_t> d(rhs);
TensorUInt128<uint64_t, uint64_t> tmp(lhs - d);
while (lhs >= d) {
tmp = tmp - d;
d = d + d;
power2 = power2 + power2;
}
tmp = TensorUInt128<uint64_t, uint64_t>(lhs.high, lhs.low);
TensorUInt128<uint64_t, uint64_t> result(0);
while (power2 != TensorUInt128<static_val<0>, static_val<0> >(0)) {
if (tmp >= d) {
tmp = tmp - d;
result = result + power2;
}
// Shift right
power2 = TensorUInt128<uint64_t, uint64_t>(power2.high >> 1, (power2.low >> 1) | (power2.high << 63));
d = TensorUInt128<uint64_t, uint64_t>(d.high >> 1, (d.low >> 1) | (d.high << 63));
}
return result;
}
}
} // namespace internal
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
| 7,552
| 29.212
| 133
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2018 Rasmus Munk Larsen <rmlarsen@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Barrier is an object that allows one or more threads to wait until
// Notify has been called a specified number of times.
#ifndef EIGEN_CXX11_THREADPOOL_BARRIER_H
#define EIGEN_CXX11_THREADPOOL_BARRIER_H
namespace Eigen {
class Barrier {
public:
Barrier(unsigned int count) : state_(count << 1), notified_(false) {
eigen_plain_assert(((count << 1) >> 1) == count);
}
~Barrier() { eigen_plain_assert((state_ >> 1) == 0); }
void Notify() {
unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2;
if (v != 1) {
// Clear the lowest bit (waiter flag) and check that the original state
// value was not zero. If it was zero, it means that notify was called
// more times than the original count.
eigen_plain_assert(((v + 2) & ~1) != 0);
return; // either count has not dropped to 0, or waiter is not waiting
}
std::unique_lock<std::mutex> l(mu_);
eigen_plain_assert(!notified_);
notified_ = true;
cv_.notify_all();
}
void Wait() {
unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel);
if ((v >> 1) == 0) return;
std::unique_lock<std::mutex> l(mu_);
while (!notified_) {
cv_.wait(l);
}
}
private:
std::mutex mu_;
std::condition_variable cv_;
std::atomic<unsigned int> state_; // low bit is waiter flag
bool notified_;
};
// Notification is an object that allows a user to to wait for another
// thread to signal a notification that an event has occurred.
//
// Multiple threads can wait on the same Notification object,
// but only one caller must call Notify() on the object.
struct Notification : Barrier {
Notification() : Barrier(1){};
};
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_BARRIER_H
| 2,113
| 30.088235
| 77
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
#define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
namespace Eigen {
// EventCount allows to wait for arbitrary predicates in non-blocking
// algorithms. Think of condition variable, but wait predicate does not need to
// be protected by a mutex. Usage:
// Waiting thread does:
//
// if (predicate)
// return act();
// EventCount::Waiter& w = waiters[my_index];
// ec.Prewait(&w);
// if (predicate) {
// ec.CancelWait(&w);
// return act();
// }
// ec.CommitWait(&w);
//
// Notifying thread does:
//
// predicate = true;
// ec.Notify(true);
//
// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not
// cheap, but they are executed only if the preceding predicate check has
// failed.
//
// Algorithm outline:
// There are two main variables: predicate (managed by user) and state_.
// Operation closely resembles Dekker mutual algorithm:
// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
// Waiting thread sets state_ then checks predicate, Notifying thread sets
// predicate then checks state_. Due to seq_cst fences in between these
// operations it is guaranteed than either waiter will see predicate change
// and won't block, or notifying thread will see state_ change and will unblock
// the waiter, or both. But it can't happen that both threads don't see each
// other changes, which would lead to deadlock.
class EventCount {
public:
class Waiter;
EventCount(MaxSizeVector<Waiter>& waiters)
: state_(kStackMask), waiters_(waiters) {
eigen_plain_assert(waiters.size() < (1 << kWaiterBits) - 1);
}
~EventCount() {
// Ensure there are no waiters.
eigen_plain_assert(state_.load() == kStackMask);
}
// Prewait prepares for waiting.
// After calling Prewait, the thread must re-check the wait predicate
// and then call either CancelWait or CommitWait.
void Prewait() {
uint64_t state = state_.load(std::memory_order_relaxed);
for (;;) {
CheckState(state);
uint64_t newstate = state + kWaiterInc;
CheckState(newstate);
if (state_.compare_exchange_weak(state, newstate,
std::memory_order_seq_cst))
return;
}
}
// CommitWait commits waiting after Prewait.
void CommitWait(Waiter* w) {
eigen_plain_assert((w->epoch & ~kEpochMask) == 0);
w->state = Waiter::kNotSignaled;
const uint64_t me = (w - &waiters_[0]) | w->epoch;
uint64_t state = state_.load(std::memory_order_seq_cst);
for (;;) {
CheckState(state, true);
uint64_t newstate;
if ((state & kSignalMask) != 0) {
// Consume the signal and return immidiately.
newstate = state - kWaiterInc - kSignalInc;
} else {
// Remove this thread from pre-wait counter and add to the waiter stack.
newstate = ((state & kWaiterMask) - kWaiterInc) | me;
w->next.store(state & (kStackMask | kEpochMask),
std::memory_order_relaxed);
}
CheckState(newstate);
if (state_.compare_exchange_weak(state, newstate,
std::memory_order_acq_rel)) {
if ((state & kSignalMask) == 0) {
w->epoch += kEpochInc;
Park(w);
}
return;
}
}
}
// CancelWait cancels effects of the previous Prewait call.
void CancelWait() {
uint64_t state = state_.load(std::memory_order_relaxed);
for (;;) {
CheckState(state, true);
uint64_t newstate = state - kWaiterInc;
// We don't know if the thread was also notified or not,
// so we should not consume a signal unconditionaly.
// Only if number of waiters is equal to number of signals,
// we know that the thread was notified and we must take away the signal.
if (((state & kWaiterMask) >> kWaiterShift) ==
((state & kSignalMask) >> kSignalShift))
newstate -= kSignalInc;
CheckState(newstate);
if (state_.compare_exchange_weak(state, newstate,
std::memory_order_acq_rel))
return;
}
}
// Notify wakes one or all waiting threads.
// Must be called after changing the associated wait predicate.
void Notify(bool notifyAll) {
std::atomic_thread_fence(std::memory_order_seq_cst);
uint64_t state = state_.load(std::memory_order_acquire);
for (;;) {
CheckState(state);
const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
const uint64_t signals = (state & kSignalMask) >> kSignalShift;
// Easy case: no waiters.
if ((state & kStackMask) == kStackMask && waiters == signals) return;
uint64_t newstate;
if (notifyAll) {
// Empty wait stack and set signal to number of pre-wait threads.
newstate =
(state & kWaiterMask) | (waiters << kSignalShift) | kStackMask;
} else if (signals < waiters) {
// There is a thread in pre-wait state, unblock it.
newstate = state + kSignalInc;
} else {
// Pop a waiter from list and unpark it.
Waiter* w = &waiters_[state & kStackMask];
uint64_t next = w->next.load(std::memory_order_relaxed);
newstate = (state & (kWaiterMask | kSignalMask)) | next;
}
CheckState(newstate);
if (state_.compare_exchange_weak(state, newstate,
std::memory_order_acq_rel)) {
if (!notifyAll && (signals < waiters))
return; // unblocked pre-wait thread
if ((state & kStackMask) == kStackMask) return;
Waiter* w = &waiters_[state & kStackMask];
if (!notifyAll) w->next.store(kStackMask, std::memory_order_relaxed);
Unpark(w);
return;
}
}
}
class Waiter {
friend class EventCount;
// Align to 128 byte boundary to prevent false sharing with other Waiter
// objects in the same vector.
EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<uint64_t> next;
std::mutex mu;
std::condition_variable cv;
uint64_t epoch = 0;
unsigned state = kNotSignaled;
enum {
kNotSignaled,
kWaiting,
kSignaled,
};
};
private:
// State_ layout:
// - low kWaiterBits is a stack of waiters committed wait
// (indexes in waiters_ array are used as stack elements,
// kStackMask means empty stack).
// - next kWaiterBits is count of waiters in prewait state.
// - next kWaiterBits is count of pending signals.
// - remaining bits are ABA counter for the stack.
// (stored in Waiter node and incremented on push).
static const uint64_t kWaiterBits = 14;
static const uint64_t kStackMask = (1ull << kWaiterBits) - 1;
static const uint64_t kWaiterShift = kWaiterBits;
static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1)
<< kWaiterShift;
static const uint64_t kWaiterInc = 1ull << kWaiterShift;
static const uint64_t kSignalShift = 2 * kWaiterBits;
static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1)
<< kSignalShift;
static const uint64_t kSignalInc = 1ull << kSignalShift;
static const uint64_t kEpochShift = 3 * kWaiterBits;
static const uint64_t kEpochBits = 64 - kEpochShift;
static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
static const uint64_t kEpochInc = 1ull << kEpochShift;
std::atomic<uint64_t> state_;
MaxSizeVector<Waiter>& waiters_;
static void CheckState(uint64_t state, bool waiter = false) {
static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem");
const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
const uint64_t signals = (state & kSignalMask) >> kSignalShift;
eigen_plain_assert(waiters >= signals);
eigen_plain_assert(waiters < (1 << kWaiterBits) - 1);
eigen_plain_assert(!waiter || waiters > 0);
(void)waiters;
(void)signals;
}
void Park(Waiter* w) {
std::unique_lock<std::mutex> lock(w->mu);
while (w->state != Waiter::kSignaled) {
w->state = Waiter::kWaiting;
w->cv.wait(lock);
}
}
void Unpark(Waiter* w) {
for (Waiter* next; w; w = next) {
uint64_t wnext = w->next.load(std::memory_order_relaxed) & kStackMask;
next = wnext == kStackMask ? nullptr : &waiters_[wnext];
unsigned state;
{
std::unique_lock<std::mutex> lock(w->mu);
state = w->state;
w->state = Waiter::kSignaled;
}
// Avoid notifying if it wasn't waiting.
if (state == Waiter::kWaiting) w->cv.notify_one();
}
}
EventCount(const EventCount&) = delete;
void operator=(const EventCount&) = delete;
};
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
| 9,121
| 35.488
| 80
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
#define EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
namespace Eigen {
template <typename Environment>
class ThreadPoolTempl : public Eigen::ThreadPoolInterface {
public:
typedef typename Environment::Task Task;
typedef RunQueue<Task, 1024> Queue;
ThreadPoolTempl(int num_threads, Environment env = Environment())
: ThreadPoolTempl(num_threads, true, env) {}
ThreadPoolTempl(int num_threads, bool allow_spinning,
Environment env = Environment())
: env_(env),
num_threads_(num_threads),
allow_spinning_(allow_spinning),
thread_data_(num_threads),
all_coprimes_(num_threads),
waiters_(num_threads),
global_steal_partition_(EncodePartition(0, num_threads_)),
blocked_(0),
spinning_(0),
done_(false),
cancelled_(false),
ec_(waiters_) {
waiters_.resize(num_threads_);
// Calculate coprimes of all numbers [1, num_threads].
// Coprimes are used for random walks over all threads in Steal
// and NonEmptyQueueIndex. Iteration is based on the fact that if we take
// a random starting thread index t and calculate num_threads - 1 subsequent
// indices as (t + coprime) % num_threads, we will cover all threads without
// repetitions (effectively getting a presudo-random permutation of thread
// indices).
eigen_plain_assert(num_threads_ < kMaxThreads);
for (int i = 1; i <= num_threads_; ++i) {
all_coprimes_.emplace_back(i);
ComputeCoprimes(i, &all_coprimes_.back());
}
#ifndef EIGEN_THREAD_LOCAL
init_barrier_.reset(new Barrier(num_threads_));
#endif
thread_data_.resize(num_threads_);
for (int i = 0; i < num_threads_; i++) {
SetStealPartition(i, EncodePartition(0, num_threads_));
thread_data_[i].thread.reset(
env_.CreateThread([this, i]() { WorkerLoop(i); }));
}
#ifndef EIGEN_THREAD_LOCAL
// Wait for workers to initialize per_thread_map_. Otherwise we might race
// with them in Schedule or CurrentThreadId.
init_barrier_->Wait();
#endif
}
~ThreadPoolTempl() {
done_ = true;
// Now if all threads block without work, they will start exiting.
// But note that threads can continue to work arbitrary long,
// block, submit new work, unblock and otherwise live full life.
if (!cancelled_) {
ec_.Notify(true);
} else {
// Since we were cancelled, there might be entries in the queues.
// Empty them to prevent their destructor from asserting.
for (size_t i = 0; i < thread_data_.size(); i++) {
thread_data_[i].queue.Flush();
}
}
// Join threads explicitly (by destroying) to avoid destruction order within
// this class.
for (size_t i = 0; i < thread_data_.size(); ++i)
thread_data_[i].thread.reset();
}
void SetStealPartitions(const std::vector<std::pair<unsigned, unsigned>>& partitions) {
eigen_plain_assert(partitions.size() == static_cast<std::size_t>(num_threads_));
// Pass this information to each thread queue.
for (int i = 0; i < num_threads_; i++) {
const auto& pair = partitions[i];
unsigned start = pair.first, end = pair.second;
AssertBounds(start, end);
unsigned val = EncodePartition(start, end);
SetStealPartition(i, val);
}
}
void Schedule(std::function<void()> fn) EIGEN_OVERRIDE {
ScheduleWithHint(std::move(fn), 0, num_threads_);
}
void ScheduleWithHint(std::function<void()> fn, int start,
int limit) override {
Task t = env_.CreateTask(std::move(fn));
PerThread* pt = GetPerThread();
if (pt->pool == this) {
// Worker thread of this pool, push onto the thread's queue.
Queue& q = thread_data_[pt->thread_id].queue;
t = q.PushFront(std::move(t));
} else {
// A free-standing thread (or worker of another pool), push onto a random
// queue.
eigen_plain_assert(start < limit);
eigen_plain_assert(limit <= num_threads_);
int num_queues = limit - start;
int rnd = Rand(&pt->rand) % num_queues;
eigen_plain_assert(start + rnd < limit);
Queue& q = thread_data_[start + rnd].queue;
t = q.PushBack(std::move(t));
}
// Note: below we touch this after making w available to worker threads.
// Strictly speaking, this can lead to a racy-use-after-free. Consider that
// Schedule is called from a thread that is neither main thread nor a worker
// thread of this pool. Then, execution of w directly or indirectly
// completes overall computations, which in turn leads to destruction of
// this. We expect that such scenario is prevented by program, that is,
// this is kept alive while any threads can potentially be in Schedule.
if (!t.f) {
ec_.Notify(false);
} else {
env_.ExecuteTask(t); // Push failed, execute directly.
}
}
void Cancel() EIGEN_OVERRIDE {
cancelled_ = true;
done_ = true;
// Let each thread know it's been cancelled.
#ifdef EIGEN_THREAD_ENV_SUPPORTS_CANCELLATION
for (size_t i = 0; i < thread_data_.size(); i++) {
thread_data_[i].thread->OnCancel();
}
#endif
// Wake up the threads without work to let them exit on their own.
ec_.Notify(true);
}
int NumThreads() const EIGEN_FINAL { return num_threads_; }
int CurrentThreadId() const EIGEN_FINAL {
const PerThread* pt = const_cast<ThreadPoolTempl*>(this)->GetPerThread();
if (pt->pool == this) {
return pt->thread_id;
} else {
return -1;
}
}
private:
// Create a single atomic<int> that encodes start and limit information for
// each thread.
// We expect num_threads_ < 65536, so we can store them in a single
// std::atomic<unsigned>.
// Exposed publicly as static functions so that external callers can reuse
// this encode/decode logic for maintaining their own thread-safe copies of
// scheduling and steal domain(s).
static const int kMaxPartitionBits = 16;
static const int kMaxThreads = 1 << kMaxPartitionBits;
inline unsigned EncodePartition(unsigned start, unsigned limit) {
return (start << kMaxPartitionBits) | limit;
}
inline void DecodePartition(unsigned val, unsigned* start, unsigned* limit) {
*limit = val & (kMaxThreads - 1);
val >>= kMaxPartitionBits;
*start = val;
}
void AssertBounds(int start, int end) {
eigen_plain_assert(start >= 0);
eigen_plain_assert(start < end); // non-zero sized partition
eigen_plain_assert(end <= num_threads_);
}
inline void SetStealPartition(size_t i, unsigned val) {
thread_data_[i].steal_partition.store(val, std::memory_order_relaxed);
}
inline unsigned GetStealPartition(int i) {
return thread_data_[i].steal_partition.load(std::memory_order_relaxed);
}
void ComputeCoprimes(int N, MaxSizeVector<unsigned>* coprimes) {
for (int i = 1; i <= N; i++) {
unsigned a = i;
unsigned b = N;
// If GCD(a, b) == 1, then a and b are coprimes.
while (b != 0) {
unsigned tmp = a;
a = b;
b = tmp % b;
}
if (a == 1) {
coprimes->push_back(i);
}
}
}
typedef typename Environment::EnvThread Thread;
struct PerThread {
constexpr PerThread() : pool(NULL), rand(0), thread_id(-1) {}
ThreadPoolTempl* pool; // Parent pool, or null for normal threads.
uint64_t rand; // Random generator state.
int thread_id; // Worker thread index in pool.
#ifndef EIGEN_THREAD_LOCAL
// Prevent false sharing.
char pad_[128];
#endif
};
struct ThreadData {
constexpr ThreadData() : thread(), steal_partition(0), queue() {}
std::unique_ptr<Thread> thread;
std::atomic<unsigned> steal_partition;
Queue queue;
};
Environment env_;
const int num_threads_;
const bool allow_spinning_;
MaxSizeVector<ThreadData> thread_data_;
MaxSizeVector<MaxSizeVector<unsigned>> all_coprimes_;
MaxSizeVector<EventCount::Waiter> waiters_;
unsigned global_steal_partition_;
std::atomic<unsigned> blocked_;
std::atomic<bool> spinning_;
std::atomic<bool> done_;
std::atomic<bool> cancelled_;
EventCount ec_;
#ifndef EIGEN_THREAD_LOCAL
std::unique_ptr<Barrier> init_barrier_;
std::mutex per_thread_map_mutex_; // Protects per_thread_map_.
std::unordered_map<uint64_t, std::unique_ptr<PerThread>> per_thread_map_;
#endif
// Main worker thread loop.
void WorkerLoop(int thread_id) {
#ifndef EIGEN_THREAD_LOCAL
std::unique_ptr<PerThread> new_pt(new PerThread());
per_thread_map_mutex_.lock();
bool insertOK = per_thread_map_.emplace(GlobalThreadIdHash(), std::move(new_pt)).second;
eigen_plain_assert(insertOK);
EIGEN_UNUSED_VARIABLE(insertOK);
per_thread_map_mutex_.unlock();
init_barrier_->Notify();
init_barrier_->Wait();
#endif
PerThread* pt = GetPerThread();
pt->pool = this;
pt->rand = GlobalThreadIdHash();
pt->thread_id = thread_id;
Queue& q = thread_data_[thread_id].queue;
EventCount::Waiter* waiter = &waiters_[thread_id];
// TODO(dvyukov,rmlarsen): The time spent in NonEmptyQueueIndex() is
// proportional to num_threads_ and we assume that new work is scheduled at
// a constant rate, so we set spin_count to 5000 / num_threads_. The
// constant was picked based on a fair dice roll, tune it.
const int spin_count =
allow_spinning_ && num_threads_ > 0 ? 5000 / num_threads_ : 0;
if (num_threads_ == 1) {
// For num_threads_ == 1 there is no point in going through the expensive
// steal loop. Moreover, since NonEmptyQueueIndex() calls PopBack() on the
// victim queues it might reverse the order in which ops are executed
// compared to the order in which they are scheduled, which tends to be
// counter-productive for the types of I/O workloads the single thread
// pools tend to be used for.
while (!cancelled_) {
Task t = q.PopFront();
for (int i = 0; i < spin_count && !t.f; i++) {
if (!cancelled_.load(std::memory_order_relaxed)) {
t = q.PopFront();
}
}
if (!t.f) {
if (!WaitForWork(waiter, &t)) {
return;
}
}
if (t.f) {
env_.ExecuteTask(t);
}
}
} else {
while (!cancelled_) {
Task t = q.PopFront();
if (!t.f) {
t = LocalSteal();
if (!t.f) {
t = GlobalSteal();
if (!t.f) {
// Leave one thread spinning. This reduces latency.
if (allow_spinning_ && !spinning_ && !spinning_.exchange(true)) {
for (int i = 0; i < spin_count && !t.f; i++) {
if (!cancelled_.load(std::memory_order_relaxed)) {
t = GlobalSteal();
} else {
return;
}
}
spinning_ = false;
}
if (!t.f) {
if (!WaitForWork(waiter, &t)) {
return;
}
}
}
}
}
if (t.f) {
env_.ExecuteTask(t);
}
}
}
}
// Steal tries to steal work from other worker threads in the range [start,
// limit) in best-effort manner.
Task Steal(unsigned start, unsigned limit) {
PerThread* pt = GetPerThread();
const size_t size = limit - start;
unsigned r = Rand(&pt->rand);
// Reduce r into [0, size) range, this utilizes trick from
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
eigen_plain_assert(all_coprimes_[size - 1].size() < (1<<30));
unsigned victim = ((uint64_t)r * (uint64_t)size) >> 32;
unsigned index = ((uint64_t) all_coprimes_[size - 1].size() * (uint64_t)r) >> 32;
unsigned inc = all_coprimes_[size - 1][index];
for (unsigned i = 0; i < size; i++) {
eigen_plain_assert(start + victim < limit);
Task t = thread_data_[start + victim].queue.PopBack();
if (t.f) {
return t;
}
victim += inc;
if (victim >= size) {
victim -= size;
}
}
return Task();
}
// Steals work within threads belonging to the partition.
Task LocalSteal() {
PerThread* pt = GetPerThread();
unsigned partition = GetStealPartition(pt->thread_id);
// If thread steal partition is the same as global partition, there is no
// need to go through the steal loop twice.
if (global_steal_partition_ == partition) return Task();
unsigned start, limit;
DecodePartition(partition, &start, &limit);
AssertBounds(start, limit);
return Steal(start, limit);
}
// Steals work from any other thread in the pool.
Task GlobalSteal() {
return Steal(0, num_threads_);
}
// WaitForWork blocks until new work is available (returns true), or if it is
// time to exit (returns false). Can optionally return a task to execute in t
// (in such case t.f != nullptr on return).
bool WaitForWork(EventCount::Waiter* waiter, Task* t) {
eigen_plain_assert(!t->f);
// We already did best-effort emptiness check in Steal, so prepare for
// blocking.
ec_.Prewait();
// Now do a reliable emptiness check.
int victim = NonEmptyQueueIndex();
if (victim != -1) {
ec_.CancelWait();
if (cancelled_) {
return false;
} else {
*t = thread_data_[victim].queue.PopBack();
return true;
}
}
// Number of blocked threads is used as termination condition.
// If we are shutting down and all worker threads blocked without work,
// that's we are done.
blocked_++;
// TODO is blocked_ required to be unsigned?
if (done_ && blocked_ == static_cast<unsigned>(num_threads_)) {
ec_.CancelWait();
// Almost done, but need to re-check queues.
// Consider that all queues are empty and all worker threads are preempted
// right after incrementing blocked_ above. Now a free-standing thread
// submits work and calls destructor (which sets done_). If we don't
// re-check queues, we will exit leaving the work unexecuted.
if (NonEmptyQueueIndex() != -1) {
// Note: we must not pop from queues before we decrement blocked_,
// otherwise the following scenario is possible. Consider that instead
// of checking for emptiness we popped the only element from queues.
// Now other worker threads can start exiting, which is bad if the
// work item submits other work. So we just check emptiness here,
// which ensures that all worker threads exit at the same time.
blocked_--;
return true;
}
// Reached stable termination state.
ec_.Notify(true);
return false;
}
ec_.CommitWait(waiter);
blocked_--;
return true;
}
int NonEmptyQueueIndex() {
PerThread* pt = GetPerThread();
// We intentionally design NonEmptyQueueIndex to steal work from
// anywhere in the queue so threads don't block in WaitForWork() forever
// when all threads in their partition go to sleep. Steal is still local.
const size_t size = thread_data_.size();
unsigned r = Rand(&pt->rand);
unsigned inc = all_coprimes_[size - 1][r % all_coprimes_[size - 1].size()];
unsigned victim = r % size;
for (unsigned i = 0; i < size; i++) {
if (!thread_data_[victim].queue.Empty()) {
return victim;
}
victim += inc;
if (victim >= size) {
victim -= size;
}
}
return -1;
}
static EIGEN_STRONG_INLINE uint64_t GlobalThreadIdHash() {
return std::hash<std::thread::id>()(std::this_thread::get_id());
}
EIGEN_STRONG_INLINE PerThread* GetPerThread() {
#ifndef EIGEN_THREAD_LOCAL
static PerThread dummy;
auto it = per_thread_map_.find(GlobalThreadIdHash());
if (it == per_thread_map_.end()) {
return &dummy;
} else {
return it->second.get();
}
#else
EIGEN_THREAD_LOCAL PerThread per_thread_;
PerThread* pt = &per_thread_;
return pt;
#endif
}
static EIGEN_STRONG_INLINE unsigned Rand(uint64_t* state) {
uint64_t current = *state;
// Update the internal state
*state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
// Generate the random output (using the PCG-XSH-RS scheme)
return static_cast<unsigned>((current ^ (current >> 22)) >>
(22 + (current >> 61)));
}
};
typedef ThreadPoolTempl<StlThreadEnvironment> ThreadPool;
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
| 17,075
| 34.063655
| 92
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Dmitry Vyukov <dvyukov@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
#define EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
namespace Eigen {
// RunQueue is a fixed-size, partially non-blocking deque or Work items.
// Operations on front of the queue must be done by a single thread (owner),
// operations on back of the queue can be done by multiple threads concurrently.
//
// Algorithm outline:
// All remote threads operating on the queue back are serialized by a mutex.
// This ensures that at most two threads access state: owner and one remote
// thread (Size aside). The algorithm ensures that the occupied region of the
// underlying array is logically continuous (can wraparound, but no stray
// occupied elements). Owner operates on one end of this region, remote thread
// operates on the other end. Synchronization between these threads
// (potential consumption of the last element and take up of the last empty
// element) happens by means of state variable in each element. States are:
// empty, busy (in process of insertion of removal) and ready. Threads claim
// elements (empty->busy and ready->busy transitions) by means of a CAS
// operation. The finishing transition (busy->empty and busy->ready) are done
// with plain store as the element is exclusively owned by the current thread.
//
// Note: we could permit only pointers as elements, then we would not need
// separate state variable as null/non-null pointer value would serve as state,
// but that would require malloc/free per operation for large, complex values
// (and this is designed to store std::function<()>).
template <typename Work, unsigned kSize>
class RunQueue {
public:
RunQueue() : front_(0), back_(0) {
// require power-of-two for fast masking
eigen_plain_assert((kSize & (kSize - 1)) == 0);
eigen_plain_assert(kSize > 2); // why would you do this?
eigen_plain_assert(kSize <= (64 << 10)); // leave enough space for counter
for (unsigned i = 0; i < kSize; i++)
array_[i].state.store(kEmpty, std::memory_order_relaxed);
}
~RunQueue() { eigen_plain_assert(Size() == 0); }
// PushFront inserts w at the beginning of the queue.
// If queue is full returns w, otherwise returns default-constructed Work.
Work PushFront(Work w) {
unsigned front = front_.load(std::memory_order_relaxed);
Elem* e = &array_[front & kMask];
uint8_t s = e->state.load(std::memory_order_relaxed);
if (s != kEmpty ||
!e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
return w;
front_.store(front + 1 + (kSize << 1), std::memory_order_relaxed);
e->w = std::move(w);
e->state.store(kReady, std::memory_order_release);
return Work();
}
// PopFront removes and returns the first element in the queue.
// If the queue was empty returns default-constructed Work.
Work PopFront() {
unsigned front = front_.load(std::memory_order_relaxed);
Elem* e = &array_[(front - 1) & kMask];
uint8_t s = e->state.load(std::memory_order_relaxed);
if (s != kReady ||
!e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
return Work();
Work w = std::move(e->w);
e->state.store(kEmpty, std::memory_order_release);
front = ((front - 1) & kMask2) | (front & ~kMask2);
front_.store(front, std::memory_order_relaxed);
return w;
}
// PushBack adds w at the end of the queue.
// If queue is full returns w, otherwise returns default-constructed Work.
Work PushBack(Work w) {
std::unique_lock<std::mutex> lock(mutex_);
unsigned back = back_.load(std::memory_order_relaxed);
Elem* e = &array_[(back - 1) & kMask];
uint8_t s = e->state.load(std::memory_order_relaxed);
if (s != kEmpty ||
!e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
return w;
back = ((back - 1) & kMask2) | (back & ~kMask2);
back_.store(back, std::memory_order_relaxed);
e->w = std::move(w);
e->state.store(kReady, std::memory_order_release);
return Work();
}
// PopBack removes and returns the last elements in the queue.
Work PopBack() {
if (Empty()) return Work();
std::unique_lock<std::mutex> lock(mutex_);
unsigned back = back_.load(std::memory_order_relaxed);
Elem* e = &array_[back & kMask];
uint8_t s = e->state.load(std::memory_order_relaxed);
if (s != kReady ||
!e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire))
return Work();
Work w = std::move(e->w);
e->state.store(kEmpty, std::memory_order_release);
back_.store(back + 1 + (kSize << 1), std::memory_order_relaxed);
return w;
}
// PopBackHalf removes and returns half last elements in the queue.
// Returns number of elements removed.
unsigned PopBackHalf(std::vector<Work>* result) {
if (Empty()) return 0;
std::unique_lock<std::mutex> lock(mutex_);
unsigned back = back_.load(std::memory_order_relaxed);
unsigned size = Size();
unsigned mid = back;
if (size > 1) mid = back + (size - 1) / 2;
unsigned n = 0;
unsigned start = 0;
for (; static_cast<int>(mid - back) >= 0; mid--) {
Elem* e = &array_[mid & kMask];
uint8_t s = e->state.load(std::memory_order_relaxed);
if (n == 0) {
if (s != kReady || !e->state.compare_exchange_strong(
s, kBusy, std::memory_order_acquire))
continue;
start = mid;
} else {
// Note: no need to store temporal kBusy, we exclusively own these
// elements.
eigen_plain_assert(s == kReady);
}
result->push_back(std::move(e->w));
e->state.store(kEmpty, std::memory_order_release);
n++;
}
if (n != 0)
back_.store(start + 1 + (kSize << 1), std::memory_order_relaxed);
return n;
}
// Size returns current queue size.
// Can be called by any thread at any time.
unsigned Size() const { return SizeOrNotEmpty<true>(); }
// Empty tests whether container is empty.
// Can be called by any thread at any time.
bool Empty() const { return SizeOrNotEmpty<false>() == 0; }
// Delete all the elements from the queue.
void Flush() {
while (!Empty()) {
PopFront();
}
}
private:
static const unsigned kMask = kSize - 1;
static const unsigned kMask2 = (kSize << 1) - 1;
struct Elem {
std::atomic<uint8_t> state;
Work w;
};
enum {
kEmpty,
kBusy,
kReady,
};
std::mutex mutex_;
// Low log(kSize) + 1 bits in front_ and back_ contain rolling index of
// front/back, respectively. The remaining bits contain modification counters
// that are incremented on Push operations. This allows us to (1) distinguish
// between empty and full conditions (if we would use log(kSize) bits for
// position, these conditions would be indistinguishable); (2) obtain
// consistent snapshot of front_/back_ for Size operation using the
// modification counters.
std::atomic<unsigned> front_;
std::atomic<unsigned> back_;
Elem array_[kSize];
// SizeOrNotEmpty returns current queue size; if NeedSizeEstimate is false,
// only whether the size is 0 is guaranteed to be correct.
// Can be called by any thread at any time.
template<bool NeedSizeEstimate>
unsigned SizeOrNotEmpty() const {
// Emptiness plays critical role in thread pool blocking. So we go to great
// effort to not produce false positives (claim non-empty queue as empty).
unsigned front = front_.load(std::memory_order_acquire);
for (;;) {
// Capture a consistent snapshot of front/tail.
unsigned back = back_.load(std::memory_order_acquire);
unsigned front1 = front_.load(std::memory_order_relaxed);
if (front != front1) {
front = front1;
std::atomic_thread_fence(std::memory_order_acquire);
continue;
}
if (NeedSizeEstimate) {
return CalculateSize(front, back);
} else {
// This value will be 0 if the queue is empty, and undefined otherwise.
unsigned maybe_zero = ((front ^ back) & kMask2);
// Queue size estimate must agree with maybe zero check on the queue
// empty/non-empty state.
eigen_assert((CalculateSize(front, back) == 0) == (maybe_zero == 0));
return maybe_zero;
}
}
}
EIGEN_ALWAYS_INLINE
unsigned CalculateSize(unsigned front, unsigned back) const {
int size = (front & kMask2) - (back & kMask2);
// Fix overflow.
if (size < 0) size += 2 * kSize;
// Order of modification in push/pop is crafted to make the queue look
// larger than it is during concurrent modifications. E.g. push can
// increment size before the corresponding pop has decremented it.
// So the computed size can be up to kSize + 1, fix it.
if (size > static_cast<int>(kSize)) size = kSize;
return static_cast<unsigned>(size);
}
RunQueue(const RunQueue&) = delete;
void operator=(const RunQueue&) = delete;
};
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
| 9,366
| 38.523207
| 80
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H
#define EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H
// Try to come up with a portable way to cancel a thread
#if EIGEN_OS_GNULINUX
#define EIGEN_THREAD_CANCEL(t) \
pthread_cancel(t.native_handle());
#define EIGEN_SUPPORTS_THREAD_CANCELLATION 1
#else
#define EIGEN_THREAD_CANCEL(t)
#endif
#endif // EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H
| 774
| 31.291667
| 69
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
#define EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
namespace Eigen {
struct StlThreadEnvironment {
struct Task {
std::function<void()> f;
};
// EnvThread constructor must start the thread,
// destructor must join the thread.
class EnvThread {
public:
EnvThread(std::function<void()> f) : thr_(std::move(f)) {}
~EnvThread() { thr_.join(); }
// This function is called when the threadpool is cancelled.
void OnCancel() { }
private:
std::thread thr_;
};
EnvThread* CreateThread(std::function<void()> f) { return new EnvThread(std::move(f)); }
Task CreateTask(std::function<void()> f) { return Task{std::move(f)}; }
void ExecuteTask(const Task& t) { t.f(); }
};
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
| 1,209
| 28.512195
| 90
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
#define EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
#ifdef EIGEN_AVOID_THREAD_LOCAL
#ifdef EIGEN_THREAD_LOCAL
#undef EIGEN_THREAD_LOCAL
#endif
#else
#if EIGEN_MAX_CPP_VER >= 11 && \
((EIGEN_COMP_GNUC && EIGEN_GNUC_AT_LEAST(4, 8)) || \
__has_feature(cxx_thread_local) || \
(EIGEN_COMP_MSVC >= 1900) )
#define EIGEN_THREAD_LOCAL static thread_local
#endif
// Disable TLS for Apple and Android builds with older toolchains.
#if defined(__APPLE__)
// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
// __IPHONE_8_0.
#include <Availability.h>
#include <TargetConditionals.h>
#endif
// Checks whether C++11's `thread_local` storage duration specifier is
// supported.
#if defined(__apple_build_version__) && \
((__apple_build_version__ < 8000042) || \
(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0))
// Notes: Xcode's clang did not support `thread_local` until version
// 8, and even then not for all iOS < 9.0.
#undef EIGEN_THREAD_LOCAL
#elif defined(__ANDROID__) && EIGEN_COMP_CLANG
// There are platforms for which TLS should not be used even though the compiler
// makes it seem like it's supported (Android NDK < r12b for example).
// This is primarily because of linker problems and toolchain misconfiguration:
// TLS isn't supported until NDK r12b per
// https://developer.android.com/ndk/downloads/revision_history.html
// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
// <android/ndk-version.h>. For NDK < r16, users should define these macros,
// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
#if __has_include(<android/ndk-version.h>)
#include <android/ndk-version.h>
#endif // __has_include(<android/ndk-version.h>)
#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
defined(__NDK_MINOR__) && \
((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
#undef EIGEN_THREAD_LOCAL
#endif
#endif // defined(__ANDROID__) && defined(__clang__)
#endif // EIGEN_AVOID_THREAD_LOCAL
namespace Eigen {
namespace internal {
template <typename T>
struct ThreadLocalNoOpInitialize {
void operator()(T&) const {}
};
template <typename T>
struct ThreadLocalNoOpRelease {
void operator()(T&) const {}
};
} // namespace internal
// Thread local container for elements of type T, that does not use thread local
// storage. As long as the number of unique threads accessing this storage
// is smaller than `capacity_`, it is lock-free and wait-free. Otherwise it will
// use a mutex for synchronization.
//
// Type `T` has to be default constructible, and by default each thread will get
// a default constructed value. It is possible to specify custom `initialize`
// callable, that will be called lazily from each thread accessing this object,
// and will be passed a default initialized object of type `T`. Also it's
// possible to pass a custom `release` callable, that will be invoked before
// calling ~T().
//
// Example:
//
// struct Counter {
// int value = 0;
// }
//
// Eigen::ThreadLocal<Counter> counter(10);
//
// // Each thread will have access to it's own counter object.
// Counter& cnt = counter.local();
// cnt++;
//
// WARNING: Eigen::ThreadLocal uses the OS-specific value returned by
// std::this_thread::get_id() to identify threads. This value is not guaranteed
// to be unique except for the life of the thread. A newly created thread may
// get an OS-specific ID equal to that of an already destroyed thread.
//
// Somewhat similar to TBB thread local storage, with similar restrictions:
// https://www.threadingbuildingblocks.org/docs/help/reference/thread_local_storage/enumerable_thread_specific_cls.html
//
template <typename T,
typename Initialize = internal::ThreadLocalNoOpInitialize<T>,
typename Release = internal::ThreadLocalNoOpRelease<T>>
class ThreadLocal {
// We preallocate default constructed elements in MaxSizedVector.
static_assert(std::is_default_constructible<T>::value,
"ThreadLocal data type must be default constructible");
public:
explicit ThreadLocal(int capacity)
: ThreadLocal(capacity, internal::ThreadLocalNoOpInitialize<T>(),
internal::ThreadLocalNoOpRelease<T>()) {}
ThreadLocal(int capacity, Initialize initialize)
: ThreadLocal(capacity, std::move(initialize),
internal::ThreadLocalNoOpRelease<T>()) {}
ThreadLocal(int capacity, Initialize initialize, Release release)
: initialize_(std::move(initialize)),
release_(std::move(release)),
capacity_(capacity),
data_(capacity_),
ptr_(capacity_),
filled_records_(0) {
eigen_assert(capacity_ >= 0);
data_.resize(capacity_);
for (int i = 0; i < capacity_; ++i) {
ptr_.emplace_back(nullptr);
}
}
T& local() {
std::thread::id this_thread = std::this_thread::get_id();
if (capacity_ == 0) return SpilledLocal(this_thread);
std::size_t h = std::hash<std::thread::id>()(this_thread);
const int start_idx = h % capacity_;
// NOTE: From the definition of `std::this_thread::get_id()` it is
// guaranteed that we never can have concurrent insertions with the same key
// to our hash-map like data structure. If we didn't find an element during
// the initial traversal, it's guaranteed that no one else could have
// inserted it while we are in this function. This allows to massively
// simplify out lock-free insert-only hash map.
// Check if we already have an element for `this_thread`.
int idx = start_idx;
while (ptr_[idx].load() != nullptr) {
ThreadIdAndValue& record = *(ptr_[idx].load());
if (record.thread_id == this_thread) return record.value;
idx += 1;
if (idx >= capacity_) idx -= capacity_;
if (idx == start_idx) break;
}
// If we are here, it means that we found an insertion point in lookup
// table at `idx`, or we did a full traversal and table is full.
// If lock-free storage is full, fallback on mutex.
if (filled_records_.load() >= capacity_) return SpilledLocal(this_thread);
// We double check that we still have space to insert an element into a lock
// free storage. If old value in `filled_records_` is larger than the
// records capacity, it means that some other thread added an element while
// we were traversing lookup table.
int insertion_index =
filled_records_.fetch_add(1, std::memory_order_relaxed);
if (insertion_index >= capacity_) return SpilledLocal(this_thread);
// At this point it's guaranteed that we can access to
// data_[insertion_index_] without a data race.
data_[insertion_index].thread_id = this_thread;
initialize_(data_[insertion_index].value);
// That's the pointer we'll put into the lookup table.
ThreadIdAndValue* inserted = &data_[insertion_index];
// We'll use nullptr pointer to ThreadIdAndValue in a compare-and-swap loop.
ThreadIdAndValue* empty = nullptr;
// Now we have to find an insertion point into the lookup table. We start
// from the `idx` that was identified as an insertion point above, it's
// guaranteed that we will have an empty record somewhere in a lookup table
// (because we created a record in the `data_`).
const int insertion_idx = idx;
do {
// Always start search from the original insertion candidate.
idx = insertion_idx;
while (ptr_[idx].load() != nullptr) {
idx += 1;
if (idx >= capacity_) idx -= capacity_;
// If we did a full loop, it means that we don't have any free entries
// in the lookup table, and this means that something is terribly wrong.
eigen_assert(idx != insertion_idx);
}
// Atomic CAS of the pointer guarantees that any other thread, that will
// follow this pointer will see all the mutations in the `data_`.
} while (!ptr_[idx].compare_exchange_weak(empty, inserted));
return inserted->value;
}
// WARN: It's not thread safe to call it concurrently with `local()`.
void ForEach(std::function<void(std::thread::id, T&)> f) {
// Reading directly from `data_` is unsafe, because only CAS to the
// record in `ptr_` makes all changes visible to other threads.
for (auto& ptr : ptr_) {
ThreadIdAndValue* record = ptr.load();
if (record == nullptr) continue;
f(record->thread_id, record->value);
}
// We did not spill into the map based storage.
if (filled_records_.load(std::memory_order_relaxed) < capacity_) return;
// Adds a happens before edge from the last call to SpilledLocal().
std::unique_lock<std::mutex> lock(mu_);
for (auto& kv : per_thread_map_) {
f(kv.first, kv.second);
}
}
// WARN: It's not thread safe to call it concurrently with `local()`.
~ThreadLocal() {
// Reading directly from `data_` is unsafe, because only CAS to the record
// in `ptr_` makes all changes visible to other threads.
for (auto& ptr : ptr_) {
ThreadIdAndValue* record = ptr.load();
if (record == nullptr) continue;
release_(record->value);
}
// We did not spill into the map based storage.
if (filled_records_.load(std::memory_order_relaxed) < capacity_) return;
// Adds a happens before edge from the last call to SpilledLocal().
std::unique_lock<std::mutex> lock(mu_);
for (auto& kv : per_thread_map_) {
release_(kv.second);
}
}
private:
struct ThreadIdAndValue {
std::thread::id thread_id;
T value;
};
// Use unordered map guarded by a mutex when lock free storage is full.
T& SpilledLocal(std::thread::id this_thread) {
std::unique_lock<std::mutex> lock(mu_);
auto it = per_thread_map_.find(this_thread);
if (it == per_thread_map_.end()) {
auto result = per_thread_map_.emplace(this_thread, T());
eigen_assert(result.second);
initialize_((*result.first).second);
return (*result.first).second;
} else {
return it->second;
}
}
Initialize initialize_;
Release release_;
const int capacity_;
// Storage that backs lock-free lookup table `ptr_`. Records stored in this
// storage contiguously starting from index 0.
MaxSizeVector<ThreadIdAndValue> data_;
// Atomic pointers to the data stored in `data_`. Used as a lookup table for
// linear probing hash map (https://en.wikipedia.org/wiki/Linear_probing).
MaxSizeVector<std::atomic<ThreadIdAndValue*>> ptr_;
// Number of records stored in the `data_`.
std::atomic<int> filled_records_;
// We fallback on per thread map if lock-free storage is full. In practice
// this should never happen, if `capacity_` is a reasonable estimate of the
// number of threads running in a system.
std::mutex mu_; // Protects per_thread_map_.
std::unordered_map<std::thread::id, T> per_thread_map_;
};
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
| 11,482
| 37.023179
| 119
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
#define EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
namespace Eigen {
// This defines an interface that ThreadPoolDevice can take to use
// custom thread pools underneath.
class ThreadPoolInterface {
public:
// Submits a closure to be run by a thread in the pool.
virtual void Schedule(std::function<void()> fn) = 0;
// Submits a closure to be run by threads in the range [start, end) in the
// pool.
virtual void ScheduleWithHint(std::function<void()> fn, int /*start*/,
int /*end*/) {
// Just defer to Schedule in case sub-classes aren't interested in
// overriding this functionality.
Schedule(fn);
}
// If implemented, stop processing the closures that have been enqueued.
// Currently running closures may still be processed.
// If not implemented, does nothing.
virtual void Cancel() {}
// Returns the number of threads in the pool.
virtual int NumThreads() const = 0;
// Returns a logical thread index between 0 and NumThreads() - 1 if called
// from one of the threads in the pool. Returns -1 otherwise.
virtual int CurrentThreadId() const = 0;
virtual ~ThreadPoolInterface() {}
};
} // namespace Eigen
#endif // EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
| 1,680
| 33.306122
| 76
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
#define EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
// Try to come up with a portable way to yield
#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7)
#define EIGEN_THREAD_YIELD() sched_yield()
#else
#define EIGEN_THREAD_YIELD() std::this_thread::yield()
#endif
#endif // EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
| 715
| 33.095238
| 69
|
h
|
null |
LRMI-main/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_FIXEDSIZEVECTOR_H
#define EIGEN_FIXEDSIZEVECTOR_H
namespace Eigen {
/** \class MaxSizeVector
* \ingroup Core
*
* \brief The MaxSizeVector class.
*
* The %MaxSizeVector provides a subset of std::vector functionality.
*
* The goal is to provide basic std::vector operations when using
* std::vector is not an option (e.g. on GPU or when compiling using
* FMA/AVX, as this can cause either compilation failures or illegal
* instruction failures).
*
* Beware: The constructors are not API compatible with these of
* std::vector.
*/
template <typename T>
class MaxSizeVector {
static const size_t alignment = EIGEN_PLAIN_ENUM_MAX(EIGEN_ALIGNOF(T), sizeof(void*));
public:
// Construct a new MaxSizeVector, reserve n elements.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit MaxSizeVector(size_t n)
: reserve_(n), size_(0),
data_(static_cast<T*>(internal::handmade_aligned_malloc(n * sizeof(T), alignment))) {
}
// Construct a new MaxSizeVector, reserve and resize to n.
// Copy the init value to all elements.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
MaxSizeVector(size_t n, const T& init)
: reserve_(n), size_(n),
data_(static_cast<T*>(internal::handmade_aligned_malloc(n * sizeof(T), alignment))) {
size_t i = 0;
EIGEN_TRY
{
for(; i < size_; ++i) { new (&data_[i]) T(init); }
}
EIGEN_CATCH(...)
{
// Construction failed, destruct in reverse order:
for(; (i+1) > 0; --i) { data_[i-1].~T(); }
internal::handmade_aligned_free(data_);
EIGEN_THROW;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
~MaxSizeVector() {
for (size_t i = size_; i > 0; --i) {
data_[i-1].~T();
}
internal::handmade_aligned_free(data_);
}
void resize(size_t n) {
eigen_assert(n <= reserve_);
for (; size_ < n; ++size_) {
new (&data_[size_]) T;
}
for (; size_ > n; --size_) {
data_[size_-1].~T();
}
eigen_assert(size_ == n);
}
// Append new elements (up to reserved size).
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void push_back(const T& t) {
eigen_assert(size_ < reserve_);
new (&data_[size_++]) T(t);
}
// For C++03 compatibility this only takes one argument
template<class X>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void emplace_back(const X& x) {
eigen_assert(size_ < reserve_);
new (&data_[size_++]) T(x);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const T& operator[] (size_t i) const {
eigen_assert(i < size_);
return data_[i];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T& operator[] (size_t i) {
eigen_assert(i < size_);
return data_[i];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T& back() {
eigen_assert(size_ > 0);
return data_[size_ - 1];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const T& back() const {
eigen_assert(size_ > 0);
return data_[size_ - 1];
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void pop_back() {
eigen_assert(size_ > 0);
data_[--size_].~T();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
size_t size() const { return size_; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool empty() const { return size_ == 0; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T* data() { return data_; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const T* data() const { return data_; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T* begin() { return data_; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
T* end() { return data_ + size_; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const T* begin() const { return data_; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const T* end() const { return data_ + size_; }
private:
size_t reserve_;
size_t size_;
T* data_;
};
} // namespace Eigen
#endif // EIGEN_FIXEDSIZEVECTOR_H
| 4,174
| 25.257862
| 93
|
h
|
null |
LRMI-main/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_AUTODIFF_JACOBIAN_H
#define EIGEN_AUTODIFF_JACOBIAN_H
namespace Eigen
{
template<typename Functor> class AutoDiffJacobian : public Functor
{
public:
AutoDiffJacobian() : Functor() {}
AutoDiffJacobian(const Functor& f) : Functor(f) {}
// forward constructors
#if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... T>
AutoDiffJacobian(const T& ...Values) : Functor(Values...) {}
#else
template<typename T0>
AutoDiffJacobian(const T0& a0) : Functor(a0) {}
template<typename T0, typename T1>
AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
template<typename T0, typename T1, typename T2>
AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {}
#endif
typedef typename Functor::InputType InputType;
typedef typename Functor::ValueType ValueType;
typedef typename ValueType::Scalar Scalar;
enum {
InputsAtCompileTime = InputType::RowsAtCompileTime,
ValuesAtCompileTime = ValueType::RowsAtCompileTime
};
typedef Matrix<Scalar, ValuesAtCompileTime, InputsAtCompileTime> JacobianType;
typedef typename JacobianType::Index Index;
typedef Matrix<Scalar, InputsAtCompileTime, 1> DerivativeType;
typedef AutoDiffScalar<DerivativeType> ActiveScalar;
typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput;
typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue;
#if EIGEN_HAS_VARIADIC_TEMPLATES
// Some compilers don't accept variadic parameters after a default parameter,
// i.e., we can't just write _jac=0 but we need to overload operator():
EIGEN_STRONG_INLINE
void operator() (const InputType& x, ValueType* v) const
{
this->operator()(x, v, 0);
}
template<typename... ParamsType>
void operator() (const InputType& x, ValueType* v, JacobianType* _jac,
const ParamsType&... Params) const
#else
void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const
#endif
{
eigen_assert(v!=0);
if (!_jac)
{
#if EIGEN_HAS_VARIADIC_TEMPLATES
Functor::operator()(x, v, Params...);
#else
Functor::operator()(x, v);
#endif
return;
}
JacobianType& jac = *_jac;
ActiveInput ax = x.template cast<ActiveScalar>();
ActiveValue av(jac.rows());
if(InputsAtCompileTime==Dynamic)
for (Index j=0; j<jac.rows(); j++)
av[j].derivatives().resize(x.rows());
for (Index i=0; i<jac.cols(); i++)
ax[i].derivatives() = DerivativeType::Unit(x.rows(),i);
#if EIGEN_HAS_VARIADIC_TEMPLATES
Functor::operator()(ax, &av, Params...);
#else
Functor::operator()(ax, &av);
#endif
for (Index i=0; i<jac.rows(); i++)
{
(*v)[i] = av[i].value();
jac.row(i) = av[i].derivatives();
}
}
};
}
#endif // EIGEN_AUTODIFF_JACOBIAN_H
| 3,150
| 27.908257
| 85
|
h
|
null |
LRMI-main/unsupported/Eigen/src/FFT/ei_fftw_impl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Mark Borgerding mark a borgerding net
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
namespace Eigen {
namespace internal {
// FFTW uses non-const arguments
// so we must use ugly const_cast calls for all the args it uses
//
// This should be safe as long as
// 1. we use FFTW_ESTIMATE for all our planning
// see the FFTW docs section 4.3.2 "Planner Flags"
// 2. fftw_complex is compatible with std::complex
// This assumes std::complex<T> layout is array of size 2 with real,imag
template <typename T>
inline
T * fftw_cast(const T* p)
{
return const_cast<T*>( p);
}
inline
fftw_complex * fftw_cast( const std::complex<double> * p)
{
return const_cast<fftw_complex*>( reinterpret_cast<const fftw_complex*>(p) );
}
inline
fftwf_complex * fftw_cast( const std::complex<float> * p)
{
return const_cast<fftwf_complex*>( reinterpret_cast<const fftwf_complex*>(p) );
}
inline
fftwl_complex * fftw_cast( const std::complex<long double> * p)
{
return const_cast<fftwl_complex*>( reinterpret_cast<const fftwl_complex*>(p) );
}
template <typename T>
struct fftw_plan {};
template <>
struct fftw_plan<float>
{
typedef float scalar_type;
typedef fftwf_complex complex_type;
fftwf_plan m_plan;
fftw_plan() :m_plan(NULL) {}
~fftw_plan() {if (m_plan) fftwf_destroy_plan(m_plan);}
inline
void fwd(complex_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwf_execute_dft( m_plan, src,dst);
}
inline
void inv(complex_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwf_execute_dft( m_plan, src,dst);
}
inline
void fwd(complex_type * dst,scalar_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftwf_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwf_execute_dft_r2c( m_plan,src,dst);
}
inline
void inv(scalar_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL)
m_plan = fftwf_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwf_execute_dft_c2r( m_plan, src,dst);
}
inline
void fwd2( complex_type * dst,complex_type * src,int n0,int n1) {
if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwf_execute_dft( m_plan, src,dst);
}
inline
void inv2( complex_type * dst,complex_type * src,int n0,int n1) {
if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwf_execute_dft( m_plan, src,dst);
}
};
template <>
struct fftw_plan<double>
{
typedef double scalar_type;
typedef fftw_complex complex_type;
::fftw_plan m_plan;
fftw_plan() :m_plan(NULL) {}
~fftw_plan() {if (m_plan) fftw_destroy_plan(m_plan);}
inline
void fwd(complex_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftw_execute_dft( m_plan, src,dst);
}
inline
void inv(complex_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftw_execute_dft( m_plan, src,dst);
}
inline
void fwd(complex_type * dst,scalar_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftw_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftw_execute_dft_r2c( m_plan,src,dst);
}
inline
void inv(scalar_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL)
m_plan = fftw_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftw_execute_dft_c2r( m_plan, src,dst);
}
inline
void fwd2( complex_type * dst,complex_type * src,int n0,int n1) {
if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftw_execute_dft( m_plan, src,dst);
}
inline
void inv2( complex_type * dst,complex_type * src,int n0,int n1) {
if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftw_execute_dft( m_plan, src,dst);
}
};
template <>
struct fftw_plan<long double>
{
typedef long double scalar_type;
typedef fftwl_complex complex_type;
fftwl_plan m_plan;
fftw_plan() :m_plan(NULL) {}
~fftw_plan() {if (m_plan) fftwl_destroy_plan(m_plan);}
inline
void fwd(complex_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwl_execute_dft( m_plan, src,dst);
}
inline
void inv(complex_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwl_execute_dft( m_plan, src,dst);
}
inline
void fwd(complex_type * dst,scalar_type * src,int nfft) {
if (m_plan==NULL) m_plan = fftwl_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwl_execute_dft_r2c( m_plan,src,dst);
}
inline
void inv(scalar_type * dst,complex_type * src,int nfft) {
if (m_plan==NULL)
m_plan = fftwl_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwl_execute_dft_c2r( m_plan, src,dst);
}
inline
void fwd2( complex_type * dst,complex_type * src,int n0,int n1) {
if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwl_execute_dft( m_plan, src,dst);
}
inline
void inv2( complex_type * dst,complex_type * src,int n0,int n1) {
if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
fftwl_execute_dft( m_plan, src,dst);
}
};
template <typename _Scalar>
struct fftw_impl
{
typedef _Scalar Scalar;
typedef std::complex<Scalar> Complex;
inline
void clear()
{
m_plans.clear();
}
// complex-to-complex forward FFT
inline
void fwd( Complex * dst,const Complex *src,int nfft)
{
get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src),nfft );
}
// real-to-complex forward FFT
inline
void fwd( Complex * dst,const Scalar * src,int nfft)
{
get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src) ,nfft);
}
// 2-d complex-to-complex
inline
void fwd2(Complex * dst, const Complex * src, int n0,int n1)
{
get_plan(n0,n1,false,dst,src).fwd2(fftw_cast(dst), fftw_cast(src) ,n0,n1);
}
// inverse complex-to-complex
inline
void inv(Complex * dst,const Complex *src,int nfft)
{
get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft );
}
// half-complex to scalar
inline
void inv( Scalar * dst,const Complex * src,int nfft)
{
get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft );
}
// 2-d complex-to-complex
inline
void inv2(Complex * dst, const Complex * src, int n0,int n1)
{
get_plan(n0,n1,true,dst,src).inv2(fftw_cast(dst), fftw_cast(src) ,n0,n1);
}
protected:
typedef fftw_plan<Scalar> PlanData;
typedef Eigen::numext::int64_t int64_t;
typedef std::map<int64_t,PlanData> PlanMap;
PlanMap m_plans;
inline
PlanData & get_plan(int nfft,bool inverse,void * dst,const void * src)
{
bool inplace = (dst==src);
bool aligned = ( (reinterpret_cast<size_t>(src)&15) | (reinterpret_cast<size_t>(dst)&15) ) == 0;
int64_t key = ( (nfft<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1;
return m_plans[key];
}
inline
PlanData & get_plan(int n0,int n1,bool inverse,void * dst,const void * src)
{
bool inplace = (dst==src);
bool aligned = ( (reinterpret_cast<size_t>(src)&15) | (reinterpret_cast<size_t>(dst)&15) ) == 0;
int64_t key = ( ( (((int64_t)n0) << 30)|(n1<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1 ) + 1;
return m_plans[key];
}
};
} // end namespace internal
} // end namespace Eigen
| 9,223
| 34.206107
| 120
|
h
|
null |
LRMI-main/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Mark Borgerding mark a borgerding net
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
namespace Eigen {
namespace internal {
// This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft
// Copyright 2003-2009 Mark Borgerding
template <typename _Scalar>
struct kiss_cpx_fft
{
typedef _Scalar Scalar;
typedef std::complex<Scalar> Complex;
std::vector<Complex> m_twiddles;
std::vector<int> m_stageRadix;
std::vector<int> m_stageRemainder;
std::vector<Complex> m_scratchBuf;
bool m_inverse;
inline void make_twiddles(int nfft, bool inverse)
{
using numext::sin;
using numext::cos;
m_inverse = inverse;
m_twiddles.resize(nfft);
double phinc = 0.25 * double(EIGEN_PI) / nfft;
Scalar flip = inverse ? Scalar(1) : Scalar(-1);
m_twiddles[0] = Complex(Scalar(1), Scalar(0));
if ((nfft&1)==0)
m_twiddles[nfft/2] = Complex(Scalar(-1), Scalar(0));
int i=1;
for (;i*8<nfft;++i)
{
Scalar c = Scalar(cos(i*8*phinc));
Scalar s = Scalar(sin(i*8*phinc));
m_twiddles[i] = Complex(c, s*flip);
m_twiddles[nfft-i] = Complex(c, -s*flip);
}
for (;i*4<nfft;++i)
{
Scalar c = Scalar(cos((2*nfft-8*i)*phinc));
Scalar s = Scalar(sin((2*nfft-8*i)*phinc));
m_twiddles[i] = Complex(s, c*flip);
m_twiddles[nfft-i] = Complex(s, -c*flip);
}
for (;i*8<3*nfft;++i)
{
Scalar c = Scalar(cos((8*i-2*nfft)*phinc));
Scalar s = Scalar(sin((8*i-2*nfft)*phinc));
m_twiddles[i] = Complex(-s, c*flip);
m_twiddles[nfft-i] = Complex(-s, -c*flip);
}
for (;i*2<nfft;++i)
{
Scalar c = Scalar(cos((4*nfft-8*i)*phinc));
Scalar s = Scalar(sin((4*nfft-8*i)*phinc));
m_twiddles[i] = Complex(-c, s*flip);
m_twiddles[nfft-i] = Complex(-c, -s*flip);
}
}
void factorize(int nfft)
{
//start factoring out 4's, then 2's, then 3,5,7,9,...
int n= nfft;
int p=4;
do {
while (n % p) {
switch (p) {
case 4: p = 2; break;
case 2: p = 3; break;
default: p += 2; break;
}
if (p*p>n)
p=n;// impossible to have a factor > sqrt(n)
}
n /= p;
m_stageRadix.push_back(p);
m_stageRemainder.push_back(n);
if ( p > 5 )
m_scratchBuf.resize(p); // scratchbuf will be needed in bfly_generic
}while(n>1);
}
template <typename _Src>
inline
void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride)
{
int p = m_stageRadix[stage];
int m = m_stageRemainder[stage];
Complex * Fout_beg = xout;
Complex * Fout_end = xout + p*m;
if (m>1) {
do{
// recursive call:
// DFT of size m*p performed by doing
// p instances of smaller DFTs of size m,
// each one takes a decimated version of the input
work(stage+1, xout , xin, fstride*p,in_stride);
xin += fstride*in_stride;
}while( (xout += m) != Fout_end );
}else{
do{
*xout = *xin;
xin += fstride*in_stride;
}while(++xout != Fout_end );
}
xout=Fout_beg;
// recombine the p smaller DFTs
switch (p) {
case 2: bfly2(xout,fstride,m); break;
case 3: bfly3(xout,fstride,m); break;
case 4: bfly4(xout,fstride,m); break;
case 5: bfly5(xout,fstride,m); break;
default: bfly_generic(xout,fstride,m,p); break;
}
}
inline
void bfly2( Complex * Fout, const size_t fstride, int m)
{
for (int k=0;k<m;++k) {
Complex t = Fout[m+k] * m_twiddles[k*fstride];
Fout[m+k] = Fout[k] - t;
Fout[k] += t;
}
}
inline
void bfly4( Complex * Fout, const size_t fstride, const size_t m)
{
Complex scratch[6];
int negative_if_inverse = m_inverse * -2 +1;
for (size_t k=0;k<m;++k) {
scratch[0] = Fout[k+m] * m_twiddles[k*fstride];
scratch[1] = Fout[k+2*m] * m_twiddles[k*fstride*2];
scratch[2] = Fout[k+3*m] * m_twiddles[k*fstride*3];
scratch[5] = Fout[k] - scratch[1];
Fout[k] += scratch[1];
scratch[3] = scratch[0] + scratch[2];
scratch[4] = scratch[0] - scratch[2];
scratch[4] = Complex( scratch[4].imag()*negative_if_inverse , -scratch[4].real()* negative_if_inverse );
Fout[k+2*m] = Fout[k] - scratch[3];
Fout[k] += scratch[3];
Fout[k+m] = scratch[5] + scratch[4];
Fout[k+3*m] = scratch[5] - scratch[4];
}
}
inline
void bfly3( Complex * Fout, const size_t fstride, const size_t m)
{
size_t k=m;
const size_t m2 = 2*m;
Complex *tw1,*tw2;
Complex scratch[5];
Complex epi3;
epi3 = m_twiddles[fstride*m];
tw1=tw2=&m_twiddles[0];
do{
scratch[1]=Fout[m] * *tw1;
scratch[2]=Fout[m2] * *tw2;
scratch[3]=scratch[1]+scratch[2];
scratch[0]=scratch[1]-scratch[2];
tw1 += fstride;
tw2 += fstride*2;
Fout[m] = Complex( Fout->real() - Scalar(.5)*scratch[3].real() , Fout->imag() - Scalar(.5)*scratch[3].imag() );
scratch[0] *= epi3.imag();
*Fout += scratch[3];
Fout[m2] = Complex( Fout[m].real() + scratch[0].imag() , Fout[m].imag() - scratch[0].real() );
Fout[m] += Complex( -scratch[0].imag(),scratch[0].real() );
++Fout;
}while(--k);
}
inline
void bfly5( Complex * Fout, const size_t fstride, const size_t m)
{
Complex *Fout0,*Fout1,*Fout2,*Fout3,*Fout4;
size_t u;
Complex scratch[13];
Complex * twiddles = &m_twiddles[0];
Complex *tw;
Complex ya,yb;
ya = twiddles[fstride*m];
yb = twiddles[fstride*2*m];
Fout0=Fout;
Fout1=Fout0+m;
Fout2=Fout0+2*m;
Fout3=Fout0+3*m;
Fout4=Fout0+4*m;
tw=twiddles;
for ( u=0; u<m; ++u ) {
scratch[0] = *Fout0;
scratch[1] = *Fout1 * tw[u*fstride];
scratch[2] = *Fout2 * tw[2*u*fstride];
scratch[3] = *Fout3 * tw[3*u*fstride];
scratch[4] = *Fout4 * tw[4*u*fstride];
scratch[7] = scratch[1] + scratch[4];
scratch[10] = scratch[1] - scratch[4];
scratch[8] = scratch[2] + scratch[3];
scratch[9] = scratch[2] - scratch[3];
*Fout0 += scratch[7];
*Fout0 += scratch[8];
scratch[5] = scratch[0] + Complex(
(scratch[7].real()*ya.real() ) + (scratch[8].real() *yb.real() ),
(scratch[7].imag()*ya.real()) + (scratch[8].imag()*yb.real())
);
scratch[6] = Complex(
(scratch[10].imag()*ya.imag()) + (scratch[9].imag()*yb.imag()),
-(scratch[10].real()*ya.imag()) - (scratch[9].real()*yb.imag())
);
*Fout1 = scratch[5] - scratch[6];
*Fout4 = scratch[5] + scratch[6];
scratch[11] = scratch[0] +
Complex(
(scratch[7].real()*yb.real()) + (scratch[8].real()*ya.real()),
(scratch[7].imag()*yb.real()) + (scratch[8].imag()*ya.real())
);
scratch[12] = Complex(
-(scratch[10].imag()*yb.imag()) + (scratch[9].imag()*ya.imag()),
(scratch[10].real()*yb.imag()) - (scratch[9].real()*ya.imag())
);
*Fout2=scratch[11]+scratch[12];
*Fout3=scratch[11]-scratch[12];
++Fout0;++Fout1;++Fout2;++Fout3;++Fout4;
}
}
/* perform the butterfly for one stage of a mixed radix FFT */
inline
void bfly_generic(
Complex * Fout,
const size_t fstride,
int m,
int p
)
{
int u,k,q1,q;
Complex * twiddles = &m_twiddles[0];
Complex t;
int Norig = static_cast<int>(m_twiddles.size());
Complex * scratchbuf = &m_scratchBuf[0];
for ( u=0; u<m; ++u ) {
k=u;
for ( q1=0 ; q1<p ; ++q1 ) {
scratchbuf[q1] = Fout[ k ];
k += m;
}
k=u;
for ( q1=0 ; q1<p ; ++q1 ) {
int twidx=0;
Fout[ k ] = scratchbuf[0];
for (q=1;q<p;++q ) {
twidx += static_cast<int>(fstride) * k;
if (twidx>=Norig) twidx-=Norig;
t=scratchbuf[q] * twiddles[twidx];
Fout[ k ] += t;
}
k += m;
}
}
}
};
template <typename _Scalar>
struct kissfft_impl
{
typedef _Scalar Scalar;
typedef std::complex<Scalar> Complex;
void clear()
{
m_plans.clear();
m_realTwiddles.clear();
}
inline
void fwd( Complex * dst,const Complex *src,int nfft)
{
get_plan(nfft,false).work(0, dst, src, 1,1);
}
inline
void fwd2( Complex * dst,const Complex *src,int n0,int n1)
{
EIGEN_UNUSED_VARIABLE(dst);
EIGEN_UNUSED_VARIABLE(src);
EIGEN_UNUSED_VARIABLE(n0);
EIGEN_UNUSED_VARIABLE(n1);
}
inline
void inv2( Complex * dst,const Complex *src,int n0,int n1)
{
EIGEN_UNUSED_VARIABLE(dst);
EIGEN_UNUSED_VARIABLE(src);
EIGEN_UNUSED_VARIABLE(n0);
EIGEN_UNUSED_VARIABLE(n1);
}
// real-to-complex forward FFT
// perform two FFTs of src even and src odd
// then twiddle to recombine them into the half-spectrum format
// then fill in the conjugate symmetric half
inline
void fwd( Complex * dst,const Scalar * src,int nfft)
{
if ( nfft&3 ) {
// use generic mode for odd
m_tmpBuf1.resize(nfft);
get_plan(nfft,false).work(0, &m_tmpBuf1[0], src, 1,1);
std::copy(m_tmpBuf1.begin(),m_tmpBuf1.begin()+(nfft>>1)+1,dst );
}else{
int ncfft = nfft>>1;
int ncfft2 = nfft>>2;
Complex * rtw = real_twiddles(ncfft2);
// use optimized mode for even real
fwd( dst, reinterpret_cast<const Complex*> (src), ncfft);
Complex dc(dst[0].real() + dst[0].imag());
Complex nyquist(dst[0].real() - dst[0].imag());
int k;
for ( k=1;k <= ncfft2 ; ++k ) {
Complex fpk = dst[k];
Complex fpnk = conj(dst[ncfft-k]);
Complex f1k = fpk + fpnk;
Complex f2k = fpk - fpnk;
Complex tw= f2k * rtw[k-1];
dst[k] = (f1k + tw) * Scalar(.5);
dst[ncfft-k] = conj(f1k -tw)*Scalar(.5);
}
dst[0] = dc;
dst[ncfft] = nyquist;
}
}
// inverse complex-to-complex
inline
void inv(Complex * dst,const Complex *src,int nfft)
{
get_plan(nfft,true).work(0, dst, src, 1,1);
}
// half-complex to scalar
inline
void inv( Scalar * dst,const Complex * src,int nfft)
{
if (nfft&3) {
m_tmpBuf1.resize(nfft);
m_tmpBuf2.resize(nfft);
std::copy(src,src+(nfft>>1)+1,m_tmpBuf1.begin() );
for (int k=1;k<(nfft>>1)+1;++k)
m_tmpBuf1[nfft-k] = conj(m_tmpBuf1[k]);
inv(&m_tmpBuf2[0],&m_tmpBuf1[0],nfft);
for (int k=0;k<nfft;++k)
dst[k] = m_tmpBuf2[k].real();
}else{
// optimized version for multiple of 4
int ncfft = nfft>>1;
int ncfft2 = nfft>>2;
Complex * rtw = real_twiddles(ncfft2);
m_tmpBuf1.resize(ncfft);
m_tmpBuf1[0] = Complex( src[0].real() + src[ncfft].real(), src[0].real() - src[ncfft].real() );
for (int k = 1; k <= ncfft / 2; ++k) {
Complex fk = src[k];
Complex fnkc = conj(src[ncfft-k]);
Complex fek = fk + fnkc;
Complex tmp = fk - fnkc;
Complex fok = tmp * conj(rtw[k-1]);
m_tmpBuf1[k] = fek + fok;
m_tmpBuf1[ncfft-k] = conj(fek - fok);
}
get_plan(ncfft,true).work(0, reinterpret_cast<Complex*>(dst), &m_tmpBuf1[0], 1,1);
}
}
protected:
typedef kiss_cpx_fft<Scalar> PlanData;
typedef std::map<int,PlanData> PlanMap;
PlanMap m_plans;
std::map<int, std::vector<Complex> > m_realTwiddles;
std::vector<Complex> m_tmpBuf1;
std::vector<Complex> m_tmpBuf2;
inline
int PlanKey(int nfft, bool isinverse) const { return (nfft<<1) | int(isinverse); }
inline
PlanData & get_plan(int nfft, bool inverse)
{
// TODO look for PlanKey(nfft, ! inverse) and conjugate the twiddles
PlanData & pd = m_plans[ PlanKey(nfft,inverse) ];
if ( pd.m_twiddles.size() == 0 ) {
pd.make_twiddles(nfft,inverse);
pd.factorize(nfft);
}
return pd;
}
inline
Complex * real_twiddles(int ncfft2)
{
using std::acos;
std::vector<Complex> & twidref = m_realTwiddles[ncfft2];// creates new if not there
if ( (int)twidref.size() != ncfft2 ) {
twidref.resize(ncfft2);
int ncfft= ncfft2<<1;
Scalar pi = acos( Scalar(-1) );
for (int k=1;k<=ncfft2;++k)
twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) );
}
return &twidref[0];
}
};
} // end namespace internal
} // end namespace Eigen
| 13,231
| 28.404444
| 119
|
h
|
null |
LRMI-main/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
/* NOTE The functions of this file have been adapted from the GMM++ library */
//========================================================================
//
// Copyright (C) 2002-2007 Yves Renard
//
// This file is a part of GETFEM++
//
// Getfem++ is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation; version 2.1 of the License.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
// USA.
//
//========================================================================
#include "../../../../Eigen/src/Core/util/NonMPL2.h"
#ifndef EIGEN_CONSTRAINEDCG_H
#define EIGEN_CONSTRAINEDCG_H
#include "../../../../Eigen/Core"
namespace Eigen {
namespace internal {
/** \ingroup IterativeLinearSolvers_Module
* Compute the pseudo inverse of the non-square matrix C such that
* \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method.
*
* This function is internally used by constrained_cg.
*/
template <typename CMatrix, typename CINVMatrix>
void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
{
// optimisable : copie de la ligne, precalcul de C * trans(C).
typedef typename CMatrix::Scalar Scalar;
typedef typename CMatrix::Index Index;
// FIXME use sparse vectors ?
typedef Matrix<Scalar,Dynamic,1> TmpVec;
Index rows = C.rows(), cols = C.cols();
TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows);
Scalar rho, rho_1, alpha;
d.setZero();
typedef Triplet<double> T;
std::vector<T> tripletList;
for (Index i = 0; i < rows; ++i)
{
d[i] = 1.0;
rho = 1.0;
e.setZero();
r = d;
p = d;
while (rho >= 1e-38)
{ /* conjugate gradient to compute e */
/* which is the i-th row of inv(C * trans(C)) */
l = C.transpose() * p;
q = C * l;
alpha = rho / p.dot(q);
e += alpha * p;
r += -alpha * q;
rho_1 = rho;
rho = r.dot(r);
p = (rho/rho_1) * p + r;
}
l = C.transpose() * e; // l is the i-th row of CINV
// FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
for (Index j=0; j<l.size(); ++j)
if (l[j]<1e-15)
tripletList.push_back(T(i,j,l(j)));
d[i] = 0.0;
}
CINV.setFromTriplets(tripletList.begin(), tripletList.end());
}
/** \ingroup IterativeLinearSolvers_Module
* Constrained conjugate gradient
*
* Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the constraint \f$ Cx \le f \f$
*/
template<typename TMatrix, typename CMatrix,
typename VectorX, typename VectorB, typename VectorF>
void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
const VectorB& b, const VectorF& f, IterationController &iter)
{
using std::sqrt;
typedef typename TMatrix::Scalar Scalar;
typedef typename TMatrix::Index Index;
typedef Matrix<Scalar,Dynamic,1> TmpVec;
Scalar rho = 1.0, rho_1, lambda, gamma;
Index xSize = x.size();
TmpVec p(xSize), q(xSize), q2(xSize),
r(xSize), old_z(xSize), z(xSize),
memox(xSize);
std::vector<bool> satured(C.rows());
p.setZero();
iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b)
if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0);
SparseMatrix<Scalar,RowMajor> CINV(C.rows(), C.cols());
pseudo_inverse(C, CINV);
while(true)
{
// computation of residual
old_z = z;
memox = x;
r = b;
r += A * -x;
z = r;
bool transition = false;
for (Index i = 0; i < C.rows(); ++i)
{
Scalar al = C.row(i).dot(x) - f.coeff(i);
if (al >= -1.0E-15)
{
if (!satured[i])
{
satured[i] = true;
transition = true;
}
Scalar bb = CINV.row(i).dot(z);
if (bb > 0.0)
// FIXME: we should allow that: z += -bb * C.row(i);
for (typename CMatrix::InnerIterator it(C,i); it; ++it)
z.coeffRef(it.index()) -= bb*it.value();
}
else
satured[i] = false;
}
// descent direction
rho_1 = rho;
rho = r.dot(z);
if (iter.finished(rho)) break;
if (transition || iter.first()) gamma = 0.0;
else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
p = z + gamma*p;
++iter;
// one dimensionnal optimization
q = A * p;
lambda = rho / q.dot(p);
for (Index i = 0; i < C.rows(); ++i)
{
if (!satured[i])
{
Scalar bb = C.row(i).dot(p) - f[i];
if (bb > 0.0)
lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
}
}
x += lambda * p;
memox -= x;
}
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CONSTRAINEDCG_H
| 5,324
| 27.324468
| 95
|
h
|
null |
LRMI-main/unsupported/Eigen/src/IterativeSolvers/GMRES.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GMRES_H
#define EIGEN_GMRES_H
namespace Eigen {
namespace internal {
/**
* Generalized Minimal Residual Algorithm based on the
* Arnoldi algorithm implemented with Householder reflections.
*
* Parameters:
* \param mat matrix of linear system of equations
* \param rhs right hand side vector of linear system of equations
* \param x on input: initial guess, on output: solution
* \param precond preconditioner used
* \param iters on input: maximum number of iterations to perform
* on output: number of iterations performed
* \param restart number of iterations for a restart
* \param tol_error on input: relative residual tolerance
* on output: residuum achieved
*
* \sa IterativeMethods::bicgstab()
*
*
* For references, please see:
*
* Saad, Y. and Schultz, M. H.
* GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems.
* SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869.
*
* Saad, Y.
* Iterative Methods for Sparse Linear Systems.
* Society for Industrial and Applied Mathematics, Philadelphia, 2003.
*
* Walker, H. F.
* Implementations of the GMRES method.
* Comput.Phys.Comm. 53, 1989, pp. 311 - 320.
*
* Walker, H. F.
* Implementation of the GMRES Method using Householder Transformations.
* SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163.
*
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) {
using std::sqrt;
using std::abs;
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
typedef Matrix < Scalar, Dynamic, 1 > VectorType;
typedef Matrix < Scalar, Dynamic, Dynamic, ColMajor> FMatrixType;
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
if(rhs.norm() <= considerAsZero)
{
x.setZero();
tol_error = 0;
return true;
}
RealScalar tol = tol_error;
const Index maxIters = iters;
iters = 0;
const Index m = mat.rows();
// residual and preconditioned residual
VectorType p0 = rhs - mat*x;
VectorType r0 = precond.solve(p0);
const RealScalar r0Norm = r0.norm();
// is initial guess already good enough?
if(r0Norm == 0)
{
tol_error = 0;
return true;
}
// storage for Hessenberg matrix and Householder data
FMatrixType H = FMatrixType::Zero(m, restart + 1);
VectorType w = VectorType::Zero(restart + 1);
VectorType tau = VectorType::Zero(restart + 1);
// storage for Jacobi rotations
std::vector < JacobiRotation < Scalar > > G(restart);
// storage for temporaries
VectorType t(m), v(m), workspace(m), x_new(m);
// generate first Householder vector
Ref<VectorType> H0_tail = H.col(0).tail(m - 1);
RealScalar beta;
r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta);
w(0) = Scalar(beta);
for (Index k = 1; k <= restart; ++k)
{
++iters;
v = VectorType::Unit(m, k - 1);
// apply Householder reflections H_{1} ... H_{k-1} to v
// TODO: use a HouseholderSequence
for (Index i = k - 1; i >= 0; --i) {
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
}
// apply matrix M to v: v = mat * v;
t.noalias() = mat * v;
v = precond.solve(t);
// apply Householder reflections H_{k-1} ... H_{1} to v
// TODO: use a HouseholderSequence
for (Index i = 0; i < k; ++i) {
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
}
if (v.tail(m - k).norm() != 0.0)
{
if (k <= restart)
{
// generate new Householder vector
Ref<VectorType> Hk_tail = H.col(k).tail(m - k - 1);
v.tail(m - k).makeHouseholder(Hk_tail, tau.coeffRef(k), beta);
// apply Householder reflection H_{k} to v
v.tail(m - k).applyHouseholderOnTheLeft(Hk_tail, tau.coeffRef(k), workspace.data());
}
}
if (k > 1)
{
for (Index i = 0; i < k - 1; ++i)
{
// apply old Givens rotations to v
v.applyOnTheLeft(i, i + 1, G[i].adjoint());
}
}
if (k<m && v(k) != (Scalar) 0)
{
// determine next Givens rotation
G[k - 1].makeGivens(v(k - 1), v(k));
// apply Givens rotation to v and w
v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
}
// insert coefficients into upper matrix triangle
H.col(k-1).head(k) = v.head(k);
tol_error = abs(w(k)) / r0Norm;
bool stop = (k==m || tol_error < tol || iters == maxIters);
if (stop || k == restart)
{
// solve upper triangular system
Ref<VectorType> y = w.head(k);
H.topLeftCorner(k, k).template triangularView <Upper>().solveInPlace(y);
// use Horner-like scheme to calculate solution vector
x_new.setZero();
for (Index i = k - 1; i >= 0; --i)
{
x_new(i) += y(i);
// apply Householder reflection H_{i} to x_new
x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
}
x += x_new;
if(stop)
{
return true;
}
else
{
k=0;
// reset data for restart
p0.noalias() = rhs - mat*x;
r0 = precond.solve(p0);
// clear Hessenberg matrix and Householder data
H.setZero();
w.setZero();
tau.setZero();
// generate first Householder vector
r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta);
w(0) = Scalar(beta);
}
}
}
return false;
}
}
template< typename _MatrixType,
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
class GMRES;
namespace internal {
template< typename _MatrixType, typename _Preconditioner>
struct traits<GMRES<_MatrixType,_Preconditioner> >
{
typedef _MatrixType MatrixType;
typedef _Preconditioner Preconditioner;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief A GMRES solver for sparse square problems
*
* This class allows to solve for A.x = b sparse linear problems using a generalized minimal
* residual method. The vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
* \code
* int n = 10000;
* VectorXd x(n), b(n);
* SparseMatrix<double> A(n,n);
* // fill A and b
* GMRES<SparseMatrix<double> > solver(A);
* x = solver.solve(b);
* std::cout << "#iterations: " << solver.iterations() << std::endl;
* std::cout << "estimated error: " << solver.error() << std::endl;
* // update b, and solve again
* x = solver.solve(b);
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
* GMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<GMRES> Base;
using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
private:
Index m_restart;
public:
using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
public:
/** Default constructor. */
GMRES() : Base(), m_restart(30) {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit GMRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_restart(30) {}
~GMRES() {}
/** Get the number of iterations after that a restart is performed.
*/
Index get_restart() { return m_restart; }
/** Set the number of iterations after that a restart is performed.
* \param restart number of iterations for a restarti, default is 30.
*/
void set_restart(const Index restart) { m_restart=restart; }
/** \internal */
template<typename Rhs,typename Dest>
void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
bool ret = internal::gmres(matrix(), b, x, Base::m_preconditioner, m_iterations, m_restart, m_error);
m_info = (!ret) ? NumericalIssue
: m_error <= Base::m_tolerance ? Success
: NoConvergence;
}
protected:
};
} // end namespace Eigen
#endif // EIGEN_GMRES_H
| 10,209
| 29.386905
| 118
|
h
|
null |
LRMI-main/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INCOMPLETE_LU_H
#define EIGEN_INCOMPLETE_LU_H
namespace Eigen {
template <typename _Scalar>
class IncompleteLU : public SparseSolverBase<IncompleteLU<_Scalar> >
{
protected:
typedef SparseSolverBase<IncompleteLU<_Scalar> > Base;
using Base::m_isInitialized;
typedef _Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef typename Vector::Index Index;
typedef SparseMatrix<Scalar,RowMajor> FactorType;
public:
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
IncompleteLU() {}
template<typename MatrixType>
IncompleteLU(const MatrixType& mat)
{
compute(mat);
}
Index rows() const { return m_lu.rows(); }
Index cols() const { return m_lu.cols(); }
template<typename MatrixType>
IncompleteLU& compute(const MatrixType& mat)
{
m_lu = mat;
int size = mat.cols();
Vector diag(size);
for(int i=0; i<size; ++i)
{
typename FactorType::InnerIterator k_it(m_lu,i);
for(; k_it && k_it.index()<i; ++k_it)
{
int k = k_it.index();
k_it.valueRef() /= diag(k);
typename FactorType::InnerIterator j_it(k_it);
typename FactorType::InnerIterator kj_it(m_lu, k);
while(kj_it && kj_it.index()<=k) ++kj_it;
for(++j_it; j_it; )
{
if(kj_it.index()==j_it.index())
{
j_it.valueRef() -= k_it.value() * kj_it.value();
++j_it;
++kj_it;
}
else if(kj_it.index()<j_it.index()) ++kj_it;
else ++j_it;
}
}
if(k_it && k_it.index()==i) diag(i) = k_it.value();
else diag(i) = 1;
}
m_isInitialized = true;
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_lu.template triangularView<UnitLower>().solve(b);
x = m_lu.template triangularView<Upper>().solve(x);
}
protected:
FactorType m_lu;
};
} // end namespace Eigen
#endif // EIGEN_INCOMPLETE_LU_H
| 2,520
| 26.703297
| 69
|
h
|
null |
LRMI-main/unsupported/Eigen/src/IterativeSolvers/IterationController.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
/* NOTE The class IterationController has been adapted from the iteration
* class of the GMM++ and ITL libraries.
*/
//=======================================================================
// Copyright (C) 1997-2001
// Authors: Andrew Lumsdaine <lums@osl.iu.edu>
// Lie-Quan Lee <llee@osl.iu.edu>
//
// This file is part of the Iterative Template Library
//
// You should have received a copy of the License Agreement for the
// Iterative Template Library along with the software; see the
// file LICENSE.
//
// Permission to modify the code and to distribute modified code is
// granted, provided the text of this NOTICE is retained, a notice that
// the code was modified is included with the above COPYRIGHT NOTICE and
// with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE
// file is distributed with the modified code.
//
// LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED.
// By way of example, but not limitation, Licensor MAKES NO
// REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY
// PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS
// OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS
// OR OTHER RIGHTS.
//=======================================================================
//========================================================================
//
// Copyright (C) 2002-2007 Yves Renard
//
// This file is a part of GETFEM++
//
// Getfem++ is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation; version 2.1 of the License.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
// USA.
//
//========================================================================
#include "../../../../Eigen/src/Core/util/NonMPL2.h"
#ifndef EIGEN_ITERATION_CONTROLLER_H
#define EIGEN_ITERATION_CONTROLLER_H
namespace Eigen {
/** \ingroup IterativeLinearSolvers_Module
* \class IterationController
*
* \brief Controls the iterations of the iterative solvers
*
* This class has been adapted from the iteration class of GMM++ and ITL libraries.
*
*/
class IterationController
{
protected :
double m_rhsn; ///< Right hand side norm
size_t m_maxiter; ///< Max. number of iterations
int m_noise; ///< if noise > 0 iterations are printed
double m_resmax; ///< maximum residual
double m_resminreach, m_resadd;
size_t m_nit; ///< iteration number
double m_res; ///< last computed residual
bool m_written;
void (*m_callback)(const IterationController&);
public :
void init()
{
m_nit = 0; m_res = 0.0; m_written = false;
m_resminreach = 1E50; m_resadd = 0.0;
m_callback = 0;
}
IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1))
: m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); }
void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; }
void operator ++() { (*this)++; }
bool first() { return m_nit == 0; }
/* get/set the "noisyness" (verbosity) of the solvers */
int noiseLevel() const { return m_noise; }
void setNoiseLevel(int n) { m_noise = n; }
void reduceNoiseLevel() { if (m_noise > 0) m_noise--; }
double maxResidual() const { return m_resmax; }
void setMaxResidual(double r) { m_resmax = r; }
double residual() const { return m_res; }
/* change the user-definable callback, called after each iteration */
void setCallback(void (*t)(const IterationController&))
{
m_callback = t;
}
size_t iteration() const { return m_nit; }
void setIteration(size_t i) { m_nit = i; }
size_t maxIterarions() const { return m_maxiter; }
void setMaxIterations(size_t i) { m_maxiter = i; }
double rhsNorm() const { return m_rhsn; }
void setRhsNorm(double r) { m_rhsn = r; }
bool converged() const { return m_res <= m_rhsn * m_resmax; }
bool converged(double nr)
{
using std::abs;
m_res = abs(nr);
m_resminreach = (std::min)(m_resminreach, m_res);
return converged();
}
template<typename VectorType> bool converged(const VectorType &v)
{ return converged(v.squaredNorm()); }
bool finished(double nr)
{
if (m_callback) m_callback(*this);
if (m_noise > 0 && !m_written)
{
converged(nr);
m_written = true;
}
return (m_nit >= m_maxiter || converged(nr));
}
template <typename VectorType>
bool finished(const MatrixBase<VectorType> &v)
{ return finished(double(v.squaredNorm())); }
};
} // end namespace Eigen
#endif // EIGEN_ITERATION_CONTROLLER_H
| 5,360
| 33.587097
| 84
|
h
|
null |
LRMI-main/unsupported/Eigen/src/IterativeSolvers/Scaling.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ITERSCALING_H
#define EIGEN_ITERSCALING_H
namespace Eigen {
/**
* \ingroup IterativeSolvers_Module
* \brief iterative scaling algorithm to equilibrate rows and column norms in matrices
*
* This class can be used as a preprocessing tool to accelerate the convergence of iterative methods
*
* This feature is useful to limit the pivoting amount during LU/ILU factorization
* The scaling strategy as presented here preserves the symmetry of the problem
* NOTE It is assumed that the matrix does not have empty row or column,
*
* Example with key steps
* \code
* VectorXd x(n), b(n);
* SparseMatrix<double> A;
* // fill A and b;
* IterScaling<SparseMatrix<double> > scal;
* // Compute the left and right scaling vectors. The matrix is equilibrated at output
* scal.computeRef(A);
* // Scale the right hand side
* b = scal.LeftScaling().cwiseProduct(b);
* // Now, solve the equilibrated linear system with any available solver
*
* // Scale back the computed solution
* x = scal.RightScaling().cwiseProduct(x);
* \endcode
*
* \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix
*
* References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552
*
* \sa \ref IncompleteLUT
*/
template<typename _MatrixType>
class IterScaling
{
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
public:
IterScaling() { init(); }
IterScaling(const MatrixType& matrix)
{
init();
compute(matrix);
}
~IterScaling() { }
/**
* Compute the left and right diagonal matrices to scale the input matrix @p mat
*
* FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal.
*
* \sa LeftScaling() RightScaling()
*/
void compute (const MatrixType& mat)
{
using std::abs;
int m = mat.rows();
int n = mat.cols();
eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
m_left.resize(m);
m_right.resize(n);
m_left.setOnes();
m_right.setOnes();
m_matrix = mat;
VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors
Dr.resize(m); Dc.resize(n);
DrRes.resize(m); DcRes.resize(n);
double EpsRow = 1.0, EpsCol = 1.0;
int its = 0;
do
{ // Iterate until the infinite norm of each row and column is approximately 1
// Get the maximum value in each row and column
Dr.setZero(); Dc.setZero();
for (int k=0; k<m_matrix.outerSize(); ++k)
{
for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
{
if ( Dr(it.row()) < abs(it.value()) )
Dr(it.row()) = abs(it.value());
if ( Dc(it.col()) < abs(it.value()) )
Dc(it.col()) = abs(it.value());
}
}
for (int i = 0; i < m; ++i)
{
Dr(i) = std::sqrt(Dr(i));
}
for (int i = 0; i < n; ++i)
{
Dc(i) = std::sqrt(Dc(i));
}
// Save the scaling factors
for (int i = 0; i < m; ++i)
{
m_left(i) /= Dr(i);
}
for (int i = 0; i < n; ++i)
{
m_right(i) /= Dc(i);
}
// Scale the rows and the columns of the matrix
DrRes.setZero(); DcRes.setZero();
for (int k=0; k<m_matrix.outerSize(); ++k)
{
for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
{
it.valueRef() = it.value()/( Dr(it.row()) * Dc(it.col()) );
// Accumulate the norms of the row and column vectors
if ( DrRes(it.row()) < abs(it.value()) )
DrRes(it.row()) = abs(it.value());
if ( DcRes(it.col()) < abs(it.value()) )
DcRes(it.col()) = abs(it.value());
}
}
DrRes.array() = (1-DrRes.array()).abs();
EpsRow = DrRes.maxCoeff();
DcRes.array() = (1-DcRes.array()).abs();
EpsCol = DcRes.maxCoeff();
its++;
}while ( (EpsRow >m_tol || EpsCol > m_tol) && (its < m_maxits) );
m_isInitialized = true;
}
/** Compute the left and right vectors to scale the vectors
* the input matrix is scaled with the computed vectors at output
*
* \sa compute()
*/
void computeRef (MatrixType& mat)
{
compute (mat);
mat = m_matrix;
}
/** Get the vector to scale the rows of the matrix
*/
VectorXd& LeftScaling()
{
return m_left;
}
/** Get the vector to scale the columns of the matrix
*/
VectorXd& RightScaling()
{
return m_right;
}
/** Set the tolerance for the convergence of the iterative scaling algorithm
*/
void setTolerance(double tol)
{
m_tol = tol;
}
protected:
void init()
{
m_tol = 1e-10;
m_maxits = 5;
m_isInitialized = false;
}
MatrixType m_matrix;
mutable ComputationInfo m_info;
bool m_isInitialized;
VectorXd m_left; // Left scaling vector
VectorXd m_right; // m_right scaling vector
double m_tol;
int m_maxits; // Maximum number of iterations allowed
};
}
#endif
| 5,853
| 29.175258
| 119
|
h
|
null |
LRMI-main/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This code initially comes from MINPACK whose original authors are:
// Copyright Jorge More - Argonne National Laboratory
// Copyright Burt Garbow - Argonne National Laboratory
// Copyright Ken Hillstrom - Argonne National Laboratory
//
// This Source Code Form is subject to the terms of the Minpack license
// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
#ifndef EIGEN_LMCOVAR_H
#define EIGEN_LMCOVAR_H
namespace Eigen {
namespace internal {
template <typename Scalar>
void covar(
Matrix< Scalar, Dynamic, Dynamic > &r,
const VectorXi& ipvt,
Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
{
using std::abs;
/* Local variables */
Index i, j, k, l, ii, jj;
bool sing;
Scalar temp;
/* Function Body */
const Index n = r.cols();
const Scalar tolr = tol * abs(r(0,0));
Matrix< Scalar, Dynamic, 1 > wa(n);
eigen_assert(ipvt.size()==n);
/* form the inverse of r in the full upper triangle of r. */
l = -1;
for (k = 0; k < n; ++k)
if (abs(r(k,k)) > tolr) {
r(k,k) = 1. / r(k,k);
for (j = 0; j <= k-1; ++j) {
temp = r(k,k) * r(j,k);
r(j,k) = 0.;
r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
}
l = k;
}
/* form the full upper triangle of the inverse of (r transpose)*r */
/* in the full upper triangle of r. */
for (k = 0; k <= l; ++k) {
for (j = 0; j <= k-1; ++j)
r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
r.col(k).head(k+1) *= r(k,k);
}
/* form the full lower triangle of the covariance matrix */
/* in the strict lower triangle of r and in wa. */
for (j = 0; j < n; ++j) {
jj = ipvt[j];
sing = j > l;
for (i = 0; i <= j; ++i) {
if (sing)
r(i,j) = 0.;
ii = ipvt[i];
if (ii > jj)
r(ii,jj) = r(i,j);
if (ii < jj)
r(jj,ii) = r(i,j);
}
wa[jj] = r(j,j);
}
/* symmetrize the covariance matrix in r. */
r.topLeftCorner(n,n).template triangularView<StrictlyUpper>() = r.topLeftCorner(n,n).transpose();
r.diagonal() = wa;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_LMCOVAR_H
| 2,443
| 27.752941
| 101
|
h
|
null |
LRMI-main/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This code initially comes from MINPACK whose original authors are:
// Copyright Jorge More - Argonne National Laboratory
// Copyright Burt Garbow - Argonne National Laboratory
// Copyright Ken Hillstrom - Argonne National Laboratory
//
// This Source Code Form is subject to the terms of the Minpack license
// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
#ifndef EIGEN_LMPAR_H
#define EIGEN_LMPAR_H
namespace Eigen {
namespace internal {
template <typename QRSolver, typename VectorType>
void lmpar2(
const QRSolver &qr,
const VectorType &diag,
const VectorType &qtb,
typename VectorType::Scalar m_delta,
typename VectorType::Scalar &par,
VectorType &x)
{
using std::sqrt;
using std::abs;
typedef typename QRSolver::MatrixType MatrixType;
typedef typename QRSolver::Scalar Scalar;
// typedef typename QRSolver::StorageIndex StorageIndex;
/* Local variables */
Index j;
Scalar fp;
Scalar parc, parl;
Index iter;
Scalar temp, paru;
Scalar gnorm;
Scalar dxnorm;
// Make a copy of the triangular factor.
// This copy is modified during call the qrsolv
MatrixType s;
s = qr.matrixR();
/* Function Body */
const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
const Index n = qr.matrixR().cols();
eigen_assert(n==diag.size());
eigen_assert(n==qtb.size());
VectorType wa1, wa2;
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
// const Index rank = qr.nonzeroPivots(); // exactly double(0.)
const Index rank = qr.rank(); // use a threshold
wa1 = qtb;
wa1.tail(n-rank).setZero();
//FIXME There is no solve in place for sparse triangularView
wa1.head(rank) = s.topLeftCorner(rank,rank).template triangularView<Upper>().solve(qtb.head(rank));
x = qr.colsPermutation()*wa1;
/* initialize the iteration counter. */
/* evaluate the function at the origin, and test */
/* for acceptance of the gauss-newton direction. */
iter = 0;
wa2 = diag.cwiseProduct(x);
dxnorm = wa2.blueNorm();
fp = dxnorm - m_delta;
if (fp <= Scalar(0.1) * m_delta) {
par = 0;
return;
}
/* if the jacobian is not rank deficient, the newton */
/* step provides a lower bound, parl, for the zero of */
/* the function. otherwise set this bound to zero. */
parl = 0.;
if (rank==n) {
wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
s.topLeftCorner(n,n).transpose().template triangularView<Lower>().solveInPlace(wa1);
temp = wa1.blueNorm();
parl = fp / m_delta / temp / temp;
}
/* calculate an upper bound, paru, for the zero of the function. */
for (j = 0; j < n; ++j)
wa1[j] = s.col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
gnorm = wa1.stableNorm();
paru = gnorm / m_delta;
if (paru == 0.)
paru = dwarf / (std::min)(m_delta,Scalar(0.1));
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
par = (std::max)(par,parl);
par = (std::min)(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
/* beginning of an iteration. */
while (true) {
++iter;
/* evaluate the function at the current value of par. */
if (par == 0.)
par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = sqrt(par)* diag;
VectorType sdiag(n);
lmqrsolv(s, qr.colsPermutation(), wa1, qtb, x, sdiag);
wa2 = diag.cwiseProduct(x);
dxnorm = wa2.blueNorm();
temp = fp;
fp = dxnorm - m_delta;
/* if the function is small enough, accept the current value */
/* of par. also test for the exceptional cases where parl */
/* is zero or the number of iterations has reached 10. */
if (abs(fp) <= Scalar(0.1) * m_delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
break;
/* compute the newton correction. */
wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
// we could almost use this here, but the diagonal is outside qr, in sdiag[]
for (j = 0; j < n; ++j) {
wa1[j] /= sdiag[j];
temp = wa1[j];
for (Index i = j+1; i < n; ++i)
wa1[i] -= s.coeff(i,j) * temp;
}
temp = wa1.blueNorm();
parc = fp / m_delta / temp / temp;
/* depending on the sign of the function, update parl or paru. */
if (fp > 0.)
parl = (std::max)(parl,par);
if (fp < 0.)
paru = (std::min)(paru,par);
/* compute an improved estimate for par. */
par = (std::max)(parl,par+parc);
}
if (iter == 0)
par = 0.;
return;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_LMPAR_H
| 5,039
| 30.304348
| 103
|
h
|
null |
LRMI-main/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
//
// This code initially comes from MINPACK whose original authors are:
// Copyright Jorge More - Argonne National Laboratory
// Copyright Burt Garbow - Argonne National Laboratory
// Copyright Ken Hillstrom - Argonne National Laboratory
//
// This Source Code Form is subject to the terms of the Minpack license
// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
#ifndef EIGEN_LMQRSOLV_H
#define EIGEN_LMQRSOLV_H
namespace Eigen {
namespace internal {
template <typename Scalar,int Rows, int Cols, typename PermIndex>
void lmqrsolv(
Matrix<Scalar,Rows,Cols> &s,
const PermutationMatrix<Dynamic,Dynamic,PermIndex> &iPerm,
const Matrix<Scalar,Dynamic,1> &diag,
const Matrix<Scalar,Dynamic,1> &qtb,
Matrix<Scalar,Dynamic,1> &x,
Matrix<Scalar,Dynamic,1> &sdiag)
{
/* Local variables */
Index i, j, k;
Scalar temp;
Index n = s.cols();
Matrix<Scalar,Dynamic,1> wa(n);
JacobiRotation<Scalar> givens;
/* Function Body */
// the following will only change the lower triangular part of s, including
// the diagonal, though the diagonal is restored afterward
/* copy r and (q transpose)*b to preserve input and initialize s. */
/* in particular, save the diagonal elements of r in x. */
x = s.diagonal();
wa = qtb;
s.topLeftCorner(n,n).template triangularView<StrictlyLower>() = s.topLeftCorner(n,n).transpose();
/* eliminate the diagonal matrix d using a givens rotation. */
for (j = 0; j < n; ++j) {
/* prepare the row of d to be eliminated, locating the */
/* diagonal element using p from the qr factorization. */
const PermIndex l = iPerm.indices()(j);
if (diag[l] == 0.)
break;
sdiag.tail(n-j).setZero();
sdiag[j] = diag[l];
/* the transformations to eliminate the row of d */
/* modify only a single element of (q transpose)*b */
/* beyond the first n, which is initially zero. */
Scalar qtbpj = 0.;
for (k = j; k < n; ++k) {
/* determine a givens rotation which eliminates the */
/* appropriate element in the current row of d. */
givens.makeGivens(-s(k,k), sdiag[k]);
/* compute the modified diagonal element of r and */
/* the modified element of ((q transpose)*b,0). */
s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k];
temp = givens.c() * wa[k] + givens.s() * qtbpj;
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
/* accumulate the transformation in the row of s. */
for (i = k+1; i<n; ++i) {
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
s(i,k) = temp;
}
}
}
/* solve the triangular system for z. if the system is */
/* singular, then obtain a least squares solution. */
Index nsing;
for(nsing=0; nsing<n && sdiag[nsing]!=0; nsing++) {}
wa.tail(n-nsing).setZero();
s.topLeftCorner(nsing, nsing).transpose().template triangularView<Upper>().solveInPlace(wa.head(nsing));
// restore
sdiag = s.diagonal();
s.diagonal() = x;
/* permute the components of z back to components of x. */
x = iPerm * wa;
}
template <typename Scalar, int _Options, typename Index>
void lmqrsolv(
SparseMatrix<Scalar,_Options,Index> &s,
const PermutationMatrix<Dynamic,Dynamic> &iPerm,
const Matrix<Scalar,Dynamic,1> &diag,
const Matrix<Scalar,Dynamic,1> &qtb,
Matrix<Scalar,Dynamic,1> &x,
Matrix<Scalar,Dynamic,1> &sdiag)
{
/* Local variables */
typedef SparseMatrix<Scalar,RowMajor,Index> FactorType;
Index i, j, k, l;
Scalar temp;
Index n = s.cols();
Matrix<Scalar,Dynamic,1> wa(n);
JacobiRotation<Scalar> givens;
/* Function Body */
// the following will only change the lower triangular part of s, including
// the diagonal, though the diagonal is restored afterward
/* copy r and (q transpose)*b to preserve input and initialize R. */
wa = qtb;
FactorType R(s);
// Eliminate the diagonal matrix d using a givens rotation
for (j = 0; j < n; ++j)
{
// Prepare the row of d to be eliminated, locating the
// diagonal element using p from the qr factorization
l = iPerm.indices()(j);
if (diag(l) == Scalar(0))
break;
sdiag.tail(n-j).setZero();
sdiag[j] = diag[l];
// the transformations to eliminate the row of d
// modify only a single element of (q transpose)*b
// beyond the first n, which is initially zero.
Scalar qtbpj = 0;
// Browse the nonzero elements of row j of the upper triangular s
for (k = j; k < n; ++k)
{
typename FactorType::InnerIterator itk(R,k);
for (; itk; ++itk){
if (itk.index() < k) continue;
else break;
}
//At this point, we have the diagonal element R(k,k)
// Determine a givens rotation which eliminates
// the appropriate element in the current row of d
givens.makeGivens(-itk.value(), sdiag(k));
// Compute the modified diagonal element of r and
// the modified element of ((q transpose)*b,0).
itk.valueRef() = givens.c() * itk.value() + givens.s() * sdiag(k);
temp = givens.c() * wa(k) + givens.s() * qtbpj;
qtbpj = -givens.s() * wa(k) + givens.c() * qtbpj;
wa(k) = temp;
// Accumulate the transformation in the remaining k row/column of R
for (++itk; itk; ++itk)
{
i = itk.index();
temp = givens.c() * itk.value() + givens.s() * sdiag(i);
sdiag(i) = -givens.s() * itk.value() + givens.c() * sdiag(i);
itk.valueRef() = temp;
}
}
}
// Solve the triangular system for z. If the system is
// singular, then obtain a least squares solution
Index nsing;
for(nsing = 0; nsing<n && sdiag(nsing) !=0; nsing++) {}
wa.tail(n-nsing).setZero();
// x = wa;
wa.head(nsing) = R.topLeftCorner(nsing,nsing).template triangularView<Upper>().solve/*InPlace*/(wa.head(nsing));
sdiag = R.diagonal();
// Permute the components of z back to components of x
x = iPerm * wa;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_LMQRSOLV_H
| 6,805
| 35.010582
| 116
|
h
|
null |
LRMI-main/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009, 2010, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2011, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIX_EXPONENTIAL
#define EIGEN_MATRIX_EXPONENTIAL
#include "StemFunction.h"
namespace Eigen {
namespace internal {
/** \brief Scaling operator.
*
* This struct is used by CwiseUnaryOp to scale a matrix by \f$ 2^{-s} \f$.
*/
template <typename RealScalar>
struct MatrixExponentialScalingOp
{
/** \brief Constructor.
*
* \param[in] squarings The integer \f$ s \f$ in this document.
*/
MatrixExponentialScalingOp(int squarings) : m_squarings(squarings) { }
/** \brief Scale a matrix coefficient.
*
* \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$.
*/
inline const RealScalar operator() (const RealScalar& x) const
{
using std::ldexp;
return ldexp(x, -m_squarings);
}
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Scale a matrix coefficient.
*
* \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$.
*/
inline const ComplexScalar operator() (const ComplexScalar& x) const
{
using std::ldexp;
return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings));
}
private:
int m_squarings;
};
/** \brief Compute the (3,3)-Padé approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*/
template <typename MatA, typename MatU, typename MatV>
void matrix_exp_pade3(const MatA& A, MatU& U, MatV& V)
{
typedef typename MatA::PlainObject MatrixType;
typedef typename NumTraits<typename traits<MatA>::Scalar>::Real RealScalar;
const RealScalar b[] = {120.L, 60.L, 12.L, 1.L};
const MatrixType A2 = A * A;
const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
V = b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
/** \brief Compute the (5,5)-Padé approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*/
template <typename MatA, typename MatU, typename MatV>
void matrix_exp_pade5(const MatA& A, MatU& U, MatV& V)
{
typedef typename MatA::PlainObject MatrixType;
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L};
const MatrixType A2 = A * A;
const MatrixType A4 = A2 * A2;
const MatrixType tmp = b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
V = b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
/** \brief Compute the (7,7)-Padé approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*/
template <typename MatA, typename MatU, typename MatV>
void matrix_exp_pade7(const MatA& A, MatU& U, MatV& V)
{
typedef typename MatA::PlainObject MatrixType;
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L};
const MatrixType A2 = A * A;
const MatrixType A4 = A2 * A2;
const MatrixType A6 = A4 * A2;
const MatrixType tmp = b[7] * A6 + b[5] * A4 + b[3] * A2
+ b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
V = b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
/** \brief Compute the (9,9)-Padé approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*/
template <typename MatA, typename MatU, typename MatV>
void matrix_exp_pade9(const MatA& A, MatU& U, MatV& V)
{
typedef typename MatA::PlainObject MatrixType;
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L,
2162160.L, 110880.L, 3960.L, 90.L, 1.L};
const MatrixType A2 = A * A;
const MatrixType A4 = A2 * A2;
const MatrixType A6 = A4 * A2;
const MatrixType A8 = A6 * A2;
const MatrixType tmp = b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2
+ b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
V = b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
/** \brief Compute the (13,13)-Padé approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*/
template <typename MatA, typename MatU, typename MatV>
void matrix_exp_pade13(const MatA& A, MatU& U, MatV& V)
{
typedef typename MatA::PlainObject MatrixType;
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L,
1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L,
33522128640.L, 1323241920.L, 40840800.L, 960960.L, 16380.L, 182.L, 1.L};
const MatrixType A2 = A * A;
const MatrixType A4 = A2 * A2;
const MatrixType A6 = A4 * A2;
V = b[13] * A6 + b[11] * A4 + b[9] * A2; // used for temporary storage
MatrixType tmp = A6 * V;
tmp += b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
tmp = b[12] * A6 + b[10] * A4 + b[8] * A2;
V.noalias() = A6 * tmp;
V += b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols());
}
/** \brief Compute the (17,17)-Padé approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
* This function activates only if your long double is double-double or quadruple.
*/
#if LDBL_MANT_DIG > 64
template <typename MatA, typename MatU, typename MatV>
void matrix_exp_pade17(const MatA& A, MatU& U, MatV& V)
{
typedef typename MatA::PlainObject MatrixType;
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
100610229646136770560000.L, 15720348382208870400000.L,
1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
46512.L, 306.L, 1.L};
const MatrixType A2 = A * A;
const MatrixType A4 = A2 * A2;
const MatrixType A6 = A4 * A2;
const MatrixType A8 = A4 * A4;
V = b[17] * A8 + b[15] * A6 + b[13] * A4 + b[11] * A2; // used for temporary storage
MatrixType tmp = A8 * V;
tmp += b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2
+ b[1] * MatrixType::Identity(A.rows(), A.cols());
U.noalias() = A * tmp;
tmp = b[16] * A8 + b[14] * A6 + b[12] * A4 + b[10] * A2;
V.noalias() = tmp * A8;
V += b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2
+ b[0] * MatrixType::Identity(A.rows(), A.cols());
}
#endif
template <typename MatrixType, typename RealScalar = typename NumTraits<typename traits<MatrixType>::Scalar>::Real>
struct matrix_exp_computeUV
{
/** \brief Compute Padé approximant to the exponential.
*
* Computes \c U, \c V and \c squarings such that \f$ (V+U)(V-U)^{-1} \f$ is a Padé
* approximant of \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$, where \f$ M \f$
* denotes the matrix \c arg. The degree of the Padé approximant and the value of squarings
* are chosen such that the approximation error is no more than the round-off error.
*/
static void run(const MatrixType& arg, MatrixType& U, MatrixType& V, int& squarings);
};
template <typename MatrixType>
struct matrix_exp_computeUV<MatrixType, float>
{
template <typename ArgType>
static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
{
using std::frexp;
using std::pow;
const float l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
squarings = 0;
if (l1norm < 4.258730016922831e-001f) {
matrix_exp_pade3(arg, U, V);
} else if (l1norm < 1.880152677804762e+000f) {
matrix_exp_pade5(arg, U, V);
} else {
const float maxnorm = 3.925724783138660f;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<float>(squarings));
matrix_exp_pade7(A, U, V);
}
}
};
template <typename MatrixType>
struct matrix_exp_computeUV<MatrixType, double>
{
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
template <typename ArgType>
static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
{
using std::frexp;
using std::pow;
const RealScalar l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
squarings = 0;
if (l1norm < 1.495585217958292e-002) {
matrix_exp_pade3(arg, U, V);
} else if (l1norm < 2.539398330063230e-001) {
matrix_exp_pade5(arg, U, V);
} else if (l1norm < 9.504178996162932e-001) {
matrix_exp_pade7(arg, U, V);
} else if (l1norm < 2.097847961257068e+000) {
matrix_exp_pade9(arg, U, V);
} else {
const RealScalar maxnorm = 5.371920351148152;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<RealScalar>(squarings));
matrix_exp_pade13(A, U, V);
}
}
};
template <typename MatrixType>
struct matrix_exp_computeUV<MatrixType, long double>
{
template <typename ArgType>
static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings)
{
#if LDBL_MANT_DIG == 53 // double precision
matrix_exp_computeUV<MatrixType, double>::run(arg, U, V, squarings);
#else
using std::frexp;
using std::pow;
const long double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff();
squarings = 0;
#if LDBL_MANT_DIG <= 64 // extended precision
if (l1norm < 4.1968497232266989671e-003L) {
matrix_exp_pade3(arg, U, V);
} else if (l1norm < 1.1848116734693823091e-001L) {
matrix_exp_pade5(arg, U, V);
} else if (l1norm < 5.5170388480686700274e-001L) {
matrix_exp_pade7(arg, U, V);
} else if (l1norm < 1.3759868875587845383e+000L) {
matrix_exp_pade9(arg, U, V);
} else {
const long double maxnorm = 4.0246098906697353063L;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
matrix_exp_pade13(A, U, V);
}
#elif LDBL_MANT_DIG <= 106 // double-double
if (l1norm < 3.2787892205607026992947488108213e-005L) {
matrix_exp_pade3(arg, U, V);
} else if (l1norm < 6.4467025060072760084130906076332e-003L) {
matrix_exp_pade5(arg, U, V);
} else if (l1norm < 6.8988028496595374751374122881143e-002L) {
matrix_exp_pade7(arg, U, V);
} else if (l1norm < 2.7339737518502231741495857201670e-001L) {
matrix_exp_pade9(arg, U, V);
} else if (l1norm < 1.3203382096514474905666448850278e+000L) {
matrix_exp_pade13(arg, U, V);
} else {
const long double maxnorm = 3.2579440895405400856599663723517L;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
matrix_exp_pade17(A, U, V);
}
#elif LDBL_MANT_DIG <= 113 // quadruple precision
if (l1norm < 1.639394610288918690547467954466970e-005L) {
matrix_exp_pade3(arg, U, V);
} else if (l1norm < 4.253237712165275566025884344433009e-003L) {
matrix_exp_pade5(arg, U, V);
} else if (l1norm < 5.125804063165764409885122032933142e-002L) {
matrix_exp_pade7(arg, U, V);
} else if (l1norm < 2.170000765161155195453205651889853e-001L) {
matrix_exp_pade9(arg, U, V);
} else if (l1norm < 1.125358383453143065081397882891878e+000L) {
matrix_exp_pade13(arg, U, V);
} else {
const long double maxnorm = 2.884233277829519311757165057717815L;
frexp(l1norm / maxnorm, &squarings);
if (squarings < 0) squarings = 0;
MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp<long double>(squarings));
matrix_exp_pade17(A, U, V);
}
#else
// this case should be handled in compute()
eigen_assert(false && "Bug in MatrixExponential");
#endif
#endif // LDBL_MANT_DIG
}
};
template<typename T> struct is_exp_known_type : false_type {};
template<> struct is_exp_known_type<float> : true_type {};
template<> struct is_exp_known_type<double> : true_type {};
#if LDBL_MANT_DIG <= 113
template<> struct is_exp_known_type<long double> : true_type {};
#endif
template <typename ArgType, typename ResultType>
void matrix_exp_compute(const ArgType& arg, ResultType &result, true_type) // natively supported scalar type
{
typedef typename ArgType::PlainObject MatrixType;
MatrixType U, V;
int squarings;
matrix_exp_computeUV<MatrixType>::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V)
MatrixType numer = U + V;
MatrixType denom = -U + V;
result = denom.partialPivLu().solve(numer);
for (int i=0; i<squarings; i++)
result *= result; // undo scaling by repeated squaring
}
/* Computes the matrix exponential
*
* \param arg argument of matrix exponential (should be plain object)
* \param result variable in which result will be stored
*/
template <typename ArgType, typename ResultType>
void matrix_exp_compute(const ArgType& arg, ResultType &result, false_type) // default
{
typedef typename ArgType::PlainObject MatrixType;
typedef typename traits<MatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename std::complex<RealScalar> ComplexScalar;
result = arg.matrixFunction(internal::stem_function_exp<ComplexScalar>);
}
} // end namespace Eigen::internal
/** \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix exponential of some matrix (expression).
*
* \tparam Derived Type of the argument to the matrix exponential.
*
* This class holds the argument to the matrix exponential until it is assigned or evaluated for
* some other reason (so the argument should not be changed in the meantime). It is the return type
* of MatrixBase::exp() and most of the time this is the only way it is used.
*/
template<typename Derived> struct MatrixExponentialReturnValue
: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
{
public:
/** \brief Constructor.
*
* \param src %Matrix (expression) forming the argument of the matrix exponential.
*/
MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
/** \brief Compute the matrix exponential.
*
* \param result the matrix exponential of \p src in the constructor.
*/
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
const typename internal::nested_eval<Derived, 10>::type tmp(m_src);
internal::matrix_exp_compute(tmp, result, internal::is_exp_known_type<typename Derived::RealScalar>());
}
Index rows() const { return m_src.rows(); }
Index cols() const { return m_src.cols(); }
protected:
const typename internal::ref_selector<Derived>::type m_src;
};
namespace internal {
template<typename Derived>
struct traits<MatrixExponentialReturnValue<Derived> >
{
typedef typename Derived::PlainObject ReturnType;
};
}
template <typename Derived>
const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
{
eigen_assert(rows() == cols());
return MatrixExponentialReturnValue<Derived>(derived());
}
} // end namespace Eigen
#endif // EIGEN_MATRIX_EXPONENTIAL
| 16,624
| 36.613122
| 115
|
h
|
null |
LRMI-main/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIX_LOGARITHM
#define EIGEN_MATRIX_LOGARITHM
namespace Eigen {
namespace internal {
template <typename Scalar>
struct matrix_log_min_pade_degree
{
static const int value = 3;
};
template <typename Scalar>
struct matrix_log_max_pade_degree
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static const int value = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
11; // quadruple precision
};
/** \brief Compute logarithm of 2x2 triangular matrix. */
template <typename MatrixType>
void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
using std::abs;
using std::ceil;
using std::imag;
using std::log;
Scalar logA00 = log(A(0,0));
Scalar logA11 = log(A(1,1));
result(0,0) = logA00;
result(1,0) = Scalar(0);
result(1,1) = logA11;
Scalar y = A(1,1) - A(0,0);
if (y==Scalar(0))
{
result(0,1) = A(0,1) / A(0,0);
}
else if ((abs(A(0,0)) < RealScalar(0.5)*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1))))
{
result(0,1) = A(0,1) * (logA11 - logA00) / y;
}
else
{
// computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
RealScalar unwindingNumber = ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,RealScalar(2*EIGEN_PI)*unwindingNumber)) / y;
}
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
inline int matrix_log_get_pade_degree(float normTminusI)
{
const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
5.3149729967117310e-1 };
const int minPadeDegree = matrix_log_min_pade_degree<float>::value;
const int maxPadeDegree = matrix_log_max_pade_degree<float>::value;
int degree = minPadeDegree;
for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
break;
return degree;
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
inline int matrix_log_get_pade_degree(double normTminusI)
{
const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
const int minPadeDegree = matrix_log_min_pade_degree<double>::value;
const int maxPadeDegree = matrix_log_max_pade_degree<double>::value;
int degree = minPadeDegree;
for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
break;
return degree;
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
inline int matrix_log_get_pade_degree(long double normTminusI)
{
#if LDBL_MANT_DIG == 53 // double precision
const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
#elif LDBL_MANT_DIG <= 64 // extended precision
const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
2.32777776523703892094e-1L };
#elif LDBL_MANT_DIG <= 106 // double-double
const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
1.05026503471351080481093652651105e-1L };
#else // quadruple precision
const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
#endif
const int minPadeDegree = matrix_log_min_pade_degree<long double>::value;
const int maxPadeDegree = matrix_log_max_pade_degree<long double>::value;
int degree = minPadeDegree;
for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
break;
return degree;
}
/* \brief Compute Pade approximation to matrix logarithm */
template <typename MatrixType>
void matrix_log_compute_pade(MatrixType& result, const MatrixType& T, int degree)
{
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
const int minPadeDegree = 3;
const int maxPadeDegree = 11;
assert(degree >= minPadeDegree && degree <= maxPadeDegree);
// FIXME this creates float-conversion-warnings if these are enabled.
// Either manually convert each value, or disable the warning locally
const RealScalar nodes[][maxPadeDegree] = {
{ 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, // degree 3
0.8872983346207416885179265399782400L },
{ 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, // degree 4
0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L },
{ 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, // degree 5
0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
0.9530899229693319963988134391496965L },
{ 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, // degree 6
0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L },
{ 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, // degree 7
0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
0.9745539561713792622630948420239256L },
{ 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, // degree 8
0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L },
{ 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, // degree 9
0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
0.9840801197538130449177881014518364L },
{ 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, // degree 10
0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L },
{ 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, // degree 11
0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
0.9891143290730284964019690005614287L } };
const RealScalar weights[][maxPadeDegree] = {
{ 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, // degree 3
0.2777777777777777777777777777777778L },
{ 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, // degree 4
0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L },
{ 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, // degree 5
0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
0.1184634425280945437571320203599587L },
{ 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, // degree 6
0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L },
{ 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, // degree 7
0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
0.0647424830844348466353057163395410L },
{ 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, // degree 8
0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L },
{ 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, // degree 9
0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
0.0406371941807872059859460790552618L },
{ 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, // degree 10
0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L },
{ 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, // degree 11
0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
0.0278342835580868332413768602212743L } };
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k) {
RealScalar weight = weights[degree-minPadeDegree][k];
RealScalar node = nodes[degree-minPadeDegree][k];
result += weight * (MatrixType::Identity(T.rows(), T.rows()) + node * TminusI)
.template triangularView<Upper>().solve(TminusI);
}
}
/** \brief Compute logarithm of triangular matrices with size > 2.
* \details This uses a inverse scale-and-square algorithm. */
template <typename MatrixType>
void matrix_log_compute_big(const MatrixType& A, MatrixType& result)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
using std::pow;
int numberOfSquareRoots = 0;
int numberOfExtraSquareRoots = 0;
int degree;
MatrixType T = A, sqrtT;
const int maxPadeDegree = matrix_log_max_pade_degree<Scalar>::value;
const RealScalar maxNormForPade = RealScalar(
maxPadeDegree<= 5? 5.3149729967117310e-1L: // single precision
maxPadeDegree<= 7? 2.6429608311114350e-1L: // double precision
maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double
1.1880960220216759245467951592883642e-1L); // quadruple precision
while (true) {
RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
if (normTminusI < maxNormForPade) {
degree = matrix_log_get_pade_degree(normTminusI);
int degree2 = matrix_log_get_pade_degree(normTminusI / RealScalar(2));
if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
break;
++numberOfExtraSquareRoots;
}
matrix_sqrt_triangular(T, sqrtT);
T = sqrtT.template triangularView<Upper>();
++numberOfSquareRoots;
}
matrix_log_compute_pade(result, T, degree);
result *= pow(RealScalar(2), RealScalar(numberOfSquareRoots)); // TODO replace by bitshift if possible
}
/** \ingroup MatrixFunctions_Module
* \class MatrixLogarithmAtomic
* \brief Helper class for computing matrix logarithm of atomic matrices.
*
* Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other.
*
* \sa class MatrixFunctionAtomic, MatrixBase::log()
*/
template <typename MatrixType>
class MatrixLogarithmAtomic
{
public:
/** \brief Compute matrix logarithm of atomic matrix
* \param[in] A argument of matrix logarithm, should be upper triangular and atomic
* \returns The logarithm of \p A.
*/
MatrixType compute(const MatrixType& A);
};
template <typename MatrixType>
MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
{
using std::log;
MatrixType result(A.rows(), A.rows());
if (A.rows() == 1)
result(0,0) = log(A(0,0));
else if (A.rows() == 2)
matrix_log_compute_2x2(A, result);
else
matrix_log_compute_big(A, result);
return result;
}
} // end of namespace internal
/** \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix logarithm of some matrix (expression).
*
* \tparam Derived Type of the argument to the matrix function.
*
* This class holds the argument to the matrix function until it is
* assigned or evaluated for some other reason (so the argument
* should not be changed in the meantime). It is the return type of
* MatrixBase::log() and most of the time this is the only way it
* is used.
*/
template<typename Derived> class MatrixLogarithmReturnValue
: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
{
public:
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
protected:
typedef typename internal::ref_selector<Derived>::type DerivedNested;
public:
/** \brief Constructor.
*
* \param[in] A %Matrix (expression) forming the argument of the matrix logarithm.
*/
explicit MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
/** \brief Compute the matrix logarithm.
*
* \param[out] result Logarithm of \c A, where \c A is as specified in the constructor.
*/
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
typedef internal::traits<DerivedEvalTypeClean> Traits;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixLogarithmAtomic<DynMatrixType> AtomicType;
AtomicType atomic;
internal::matrix_function_compute<typename DerivedEvalTypeClean::PlainObject>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
Index cols() const { return m_A.cols(); }
private:
const DerivedNested m_A;
};
namespace internal {
template<typename Derived>
struct traits<MatrixLogarithmReturnValue<Derived> >
{
typedef typename Derived::PlainObject ReturnType;
};
}
/********** MatrixBase method **********/
template <typename Derived>
const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
{
eigen_assert(rows() == cols());
return MatrixLogarithmReturnValue<Derived>(derived());
}
} // end namespace Eigen
#endif // EIGEN_MATRIX_LOGARITHM
| 17,557
| 45.946524
| 123
|
h
|
null |
LRMI-main/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIX_POWER
#define EIGEN_MATRIX_POWER
namespace Eigen {
template<typename MatrixType> class MatrixPower;
/**
* \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix power of some matrix.
*
* \tparam MatrixType type of the base, a matrix.
*
* This class holds the arguments to the matrix power until it is
* assigned or evaluated for some other reason (so the argument
* should not be changed in the meantime). It is the return type of
* MatrixPower::operator() and related functions and most of the
* time this is the only way it is used.
*/
/* TODO This class is only used by MatrixPower, so it should be nested
* into MatrixPower, like MatrixPower::ReturnValue. However, my
* compiler complained about unused template parameter in the
* following declaration in namespace internal.
*
* template<typename MatrixType>
* struct traits<MatrixPower<MatrixType>::ReturnValue>;
*/
template<typename MatrixType>
class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParenthesesReturnValue<MatrixType> >
{
public:
typedef typename MatrixType::RealScalar RealScalar;
/**
* \brief Constructor.
*
* \param[in] pow %MatrixPower storing the base.
* \param[in] p scalar, the exponent of the matrix power.
*/
MatrixPowerParenthesesReturnValue(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
{ }
/**
* \brief Compute the matrix power.
*
* \param[out] result
*/
template<typename ResultType>
inline void evalTo(ResultType& result) const
{ m_pow.compute(result, m_p); }
Index rows() const { return m_pow.rows(); }
Index cols() const { return m_pow.cols(); }
private:
MatrixPower<MatrixType>& m_pow;
const RealScalar m_p;
};
/**
* \ingroup MatrixFunctions_Module
*
* \brief Class for computing matrix powers.
*
* \tparam MatrixType type of the base, expected to be an instantiation
* of the Matrix class template.
*
* This class is capable of computing triangular real/complex matrices
* raised to a power in the interval \f$ (-1, 1) \f$.
*
* \note Currently this class is only used by MatrixPower. One may
* insist that this be nested into MatrixPower. This class is here to
* facilitate future development of triangular matrix functions.
*/
template<typename MatrixType>
class MatrixPowerAtomic : internal::noncopyable
{
private:
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
typedef Block<MatrixType,Dynamic,Dynamic> ResultType;
const MatrixType& m_A;
RealScalar m_p;
void computePade(int degree, const MatrixType& IminusT, ResultType& res) const;
void compute2x2(ResultType& res, RealScalar p) const;
void computeBig(ResultType& res) const;
static int getPadeDegree(float normIminusT);
static int getPadeDegree(double normIminusT);
static int getPadeDegree(long double normIminusT);
static ComplexScalar computeSuperDiag(const ComplexScalar&, const ComplexScalar&, RealScalar p);
static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p);
public:
/**
* \brief Constructor.
*
* \param[in] T the base of the matrix power.
* \param[in] p the exponent of the matrix power, should be in
* \f$ (-1, 1) \f$.
*
* The class stores a reference to T, so it should not be changed
* (or destroyed) before evaluation. Only the upper triangular
* part of T is read.
*/
MatrixPowerAtomic(const MatrixType& T, RealScalar p);
/**
* \brief Compute the matrix power.
*
* \param[out] res \f$ A^p \f$ where A and p are specified in the
* constructor.
*/
void compute(ResultType& res) const;
};
template<typename MatrixType>
MatrixPowerAtomic<MatrixType>::MatrixPowerAtomic(const MatrixType& T, RealScalar p) :
m_A(T), m_p(p)
{
eigen_assert(T.rows() == T.cols());
eigen_assert(p > -1 && p < 1);
}
template<typename MatrixType>
void MatrixPowerAtomic<MatrixType>::compute(ResultType& res) const
{
using std::pow;
switch (m_A.rows()) {
case 0:
break;
case 1:
res(0,0) = pow(m_A(0,0), m_p);
break;
case 2:
compute2x2(res, m_p);
break;
default:
computeBig(res);
}
}
template<typename MatrixType>
void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, ResultType& res) const
{
int i = 2*degree;
res = (m_p-RealScalar(degree)) / RealScalar(2*i-2) * IminusT;
for (--i; i; --i) {
res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView<Upper>()
.solve((i==1 ? -m_p : i&1 ? (-m_p-RealScalar(i/2))/RealScalar(2*i) : (m_p-RealScalar(i/2))/RealScalar(2*i-2)) * IminusT).eval();
}
res += MatrixType::Identity(IminusT.rows(), IminusT.cols());
}
// This function assumes that res has the correct size (see bug 614)
template<typename MatrixType>
void MatrixPowerAtomic<MatrixType>::compute2x2(ResultType& res, RealScalar p) const
{
using std::abs;
using std::pow;
res.coeffRef(0,0) = pow(m_A.coeff(0,0), p);
for (Index i=1; i < m_A.cols(); ++i) {
res.coeffRef(i,i) = pow(m_A.coeff(i,i), p);
if (m_A.coeff(i-1,i-1) == m_A.coeff(i,i))
res.coeffRef(i-1,i) = p * pow(m_A.coeff(i,i), p-1);
else if (2*abs(m_A.coeff(i-1,i-1)) < abs(m_A.coeff(i,i)) || 2*abs(m_A.coeff(i,i)) < abs(m_A.coeff(i-1,i-1)))
res.coeffRef(i-1,i) = (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_A.coeff(i,i)-m_A.coeff(i-1,i-1));
else
res.coeffRef(i-1,i) = computeSuperDiag(m_A.coeff(i,i), m_A.coeff(i-1,i-1), p);
res.coeffRef(i-1,i) *= m_A.coeff(i-1,i);
}
}
template<typename MatrixType>
void MatrixPowerAtomic<MatrixType>::computeBig(ResultType& res) const
{
using std::ldexp;
const int digits = std::numeric_limits<RealScalar>::digits;
const RealScalar maxNormForPade = RealScalar(
digits <= 24? 4.3386528e-1L // single precision
: digits <= 53? 2.789358995219730e-1L // double precision
: digits <= 64? 2.4471944416607995472e-1L // extended precision
: digits <= 106? 1.1016843812851143391275867258512e-1L // double-double
: 9.134603732914548552537150753385375e-2L); // quadruple precision
MatrixType IminusT, sqrtT, T = m_A.template triangularView<Upper>();
RealScalar normIminusT;
int degree, degree2, numberOfSquareRoots = 0;
bool hasExtraSquareRoot = false;
for (Index i=0; i < m_A.cols(); ++i)
eigen_assert(m_A(i,i) != RealScalar(0));
while (true) {
IminusT = MatrixType::Identity(m_A.rows(), m_A.cols()) - T;
normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff();
if (normIminusT < maxNormForPade) {
degree = getPadeDegree(normIminusT);
degree2 = getPadeDegree(normIminusT/2);
if (degree - degree2 <= 1 || hasExtraSquareRoot)
break;
hasExtraSquareRoot = true;
}
matrix_sqrt_triangular(T, sqrtT);
T = sqrtT.template triangularView<Upper>();
++numberOfSquareRoots;
}
computePade(degree, IminusT, res);
for (; numberOfSquareRoots; --numberOfSquareRoots) {
compute2x2(res, ldexp(m_p, -numberOfSquareRoots));
res = res.template triangularView<Upper>() * res;
}
compute2x2(res, m_p);
}
template<typename MatrixType>
inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(float normIminusT)
{
const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f };
int degree = 3;
for (; degree <= 4; ++degree)
if (normIminusT <= maxNormForPade[degree - 3])
break;
return degree;
}
template<typename MatrixType>
inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(double normIminusT)
{
const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1,
1.999045567181744e-1, 2.789358995219730e-1 };
int degree = 3;
for (; degree <= 7; ++degree)
if (normIminusT <= maxNormForPade[degree - 3])
break;
return degree;
}
template<typename MatrixType>
inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(long double normIminusT)
{
#if LDBL_MANT_DIG == 53
const int maxPadeDegree = 7;
const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L,
1.999045567181744e-1L, 2.789358995219730e-1L };
#elif LDBL_MANT_DIG <= 64
const int maxPadeDegree = 8;
const long double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L };
#elif LDBL_MANT_DIG <= 106
const int maxPadeDegree = 10;
const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ ,
1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L,
2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L,
1.1016843812851143391275867258512e-1L };
#else
const int maxPadeDegree = 10;
const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ ,
6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L,
9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L,
3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L,
9.134603732914548552537150753385375e-2L };
#endif
int degree = 3;
for (; degree <= maxPadeDegree; ++degree)
if (normIminusT <= maxNormForPade[degree - 3])
break;
return degree;
}
template<typename MatrixType>
inline typename MatrixPowerAtomic<MatrixType>::ComplexScalar
MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p)
{
using std::ceil;
using std::exp;
using std::log;
using std::sinh;
ComplexScalar logCurr = log(curr);
ComplexScalar logPrev = log(prev);
RealScalar unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI));
ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, RealScalar(EIGEN_PI)*unwindingNumber);
return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev);
}
template<typename MatrixType>
inline typename MatrixPowerAtomic<MatrixType>::RealScalar
MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p)
{
using std::exp;
using std::log;
using std::sinh;
RealScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2);
return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev);
}
/**
* \ingroup MatrixFunctions_Module
*
* \brief Class for computing matrix powers.
*
* \tparam MatrixType type of the base, expected to be an instantiation
* of the Matrix class template.
*
* This class is capable of computing real/complex matrices raised to
* an arbitrary real power. Meanwhile, it saves the result of Schur
* decomposition if an non-integral power has even been calculated.
* Therefore, if you want to compute multiple (>= 2) matrix powers
* for the same matrix, using the class directly is more efficient than
* calling MatrixBase::pow().
*
* Example:
* \include MatrixPower_optimal.cpp
* Output: \verbinclude MatrixPower_optimal.out
*/
template<typename MatrixType>
class MatrixPower : internal::noncopyable
{
private:
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
public:
/**
* \brief Constructor.
*
* \param[in] A the base of the matrix power.
*
* The class stores a reference to A, so it should not be changed
* (or destroyed) before evaluation.
*/
explicit MatrixPower(const MatrixType& A) :
m_A(A),
m_conditionNumber(0),
m_rank(A.cols()),
m_nulls(0)
{ eigen_assert(A.rows() == A.cols()); }
/**
* \brief Returns the matrix power.
*
* \param[in] p exponent, a real scalar.
* \return The expression \f$ A^p \f$, where A is specified in the
* constructor.
*/
const MatrixPowerParenthesesReturnValue<MatrixType> operator()(RealScalar p)
{ return MatrixPowerParenthesesReturnValue<MatrixType>(*this, p); }
/**
* \brief Compute the matrix power.
*
* \param[in] p exponent, a real scalar.
* \param[out] res \f$ A^p \f$ where A is specified in the
* constructor.
*/
template<typename ResultType>
void compute(ResultType& res, RealScalar p);
Index rows() const { return m_A.rows(); }
Index cols() const { return m_A.cols(); }
private:
typedef std::complex<RealScalar> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0,
MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> ComplexMatrix;
/** \brief Reference to the base of matrix power. */
typename MatrixType::Nested m_A;
/** \brief Temporary storage. */
MatrixType m_tmp;
/** \brief Store the result of Schur decomposition. */
ComplexMatrix m_T, m_U;
/** \brief Store fractional power of m_T. */
ComplexMatrix m_fT;
/**
* \brief Condition number of m_A.
*
* It is initialized as 0 to avoid performing unnecessary Schur
* decomposition, which is the bottleneck.
*/
RealScalar m_conditionNumber;
/** \brief Rank of m_A. */
Index m_rank;
/** \brief Rank deficiency of m_A. */
Index m_nulls;
/**
* \brief Split p into integral part and fractional part.
*
* \param[in] p The exponent.
* \param[out] p The fractional part ranging in \f$ (-1, 1) \f$.
* \param[out] intpart The integral part.
*
* Only if the fractional part is nonzero, it calls initialize().
*/
void split(RealScalar& p, RealScalar& intpart);
/** \brief Perform Schur decomposition for fractional power. */
void initialize();
template<typename ResultType>
void computeIntPower(ResultType& res, RealScalar p);
template<typename ResultType>
void computeFracPower(ResultType& res, RealScalar p);
template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
static void revertSchur(
Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
const ComplexMatrix& T,
const ComplexMatrix& U);
template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
static void revertSchur(
Matrix<RealScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
const ComplexMatrix& T,
const ComplexMatrix& U);
};
template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::compute(ResultType& res, RealScalar p)
{
using std::pow;
switch (cols()) {
case 0:
break;
case 1:
res(0,0) = pow(m_A.coeff(0,0), p);
break;
default:
RealScalar intpart;
split(p, intpart);
res = MatrixType::Identity(rows(), cols());
computeIntPower(res, intpart);
if (p) computeFracPower(res, p);
}
}
template<typename MatrixType>
void MatrixPower<MatrixType>::split(RealScalar& p, RealScalar& intpart)
{
using std::floor;
using std::pow;
intpart = floor(p);
p -= intpart;
// Perform Schur decomposition if it is not yet performed and the power is
// not an integer.
if (!m_conditionNumber && p)
initialize();
// Choose the more stable of intpart = floor(p) and intpart = ceil(p).
if (p > RealScalar(0.5) && p > (1-p) * pow(m_conditionNumber, p)) {
--p;
++intpart;
}
}
template<typename MatrixType>
void MatrixPower<MatrixType>::initialize()
{
const ComplexSchur<MatrixType> schurOfA(m_A);
JacobiRotation<ComplexScalar> rot;
ComplexScalar eigenvalue;
m_fT.resizeLike(m_A);
m_T = schurOfA.matrixT();
m_U = schurOfA.matrixU();
m_conditionNumber = m_T.diagonal().array().abs().maxCoeff() / m_T.diagonal().array().abs().minCoeff();
// Move zero eigenvalues to the bottom right corner.
for (Index i = cols()-1; i>=0; --i) {
if (m_rank <= 2)
return;
if (m_T.coeff(i,i) == RealScalar(0)) {
for (Index j=i+1; j < m_rank; ++j) {
eigenvalue = m_T.coeff(j,j);
rot.makeGivens(m_T.coeff(j-1,j), eigenvalue);
m_T.applyOnTheRight(j-1, j, rot);
m_T.applyOnTheLeft(j-1, j, rot.adjoint());
m_T.coeffRef(j-1,j-1) = eigenvalue;
m_T.coeffRef(j,j) = RealScalar(0);
m_U.applyOnTheRight(j-1, j, rot);
}
--m_rank;
}
}
m_nulls = rows() - m_rank;
if (m_nulls) {
eigen_assert(m_T.bottomRightCorner(m_nulls, m_nulls).isZero()
&& "Base of matrix power should be invertible or with a semisimple zero eigenvalue.");
m_fT.bottomRows(m_nulls).fill(RealScalar(0));
}
}
template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::computeIntPower(ResultType& res, RealScalar p)
{
using std::abs;
using std::fmod;
RealScalar pp = abs(p);
if (p<0)
m_tmp = m_A.inverse();
else
m_tmp = m_A;
while (true) {
if (fmod(pp, 2) >= 1)
res = m_tmp * res;
pp /= 2;
if (pp < 1)
break;
m_tmp *= m_tmp;
}
}
template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::computeFracPower(ResultType& res, RealScalar p)
{
Block<ComplexMatrix,Dynamic,Dynamic> blockTp(m_fT, 0, 0, m_rank, m_rank);
eigen_assert(m_conditionNumber);
eigen_assert(m_rank + m_nulls == rows());
MatrixPowerAtomic<ComplexMatrix>(m_T.topLeftCorner(m_rank, m_rank), p).compute(blockTp);
if (m_nulls) {
m_fT.topRightCorner(m_rank, m_nulls) = m_T.topLeftCorner(m_rank, m_rank).template triangularView<Upper>()
.solve(blockTp * m_T.topRightCorner(m_rank, m_nulls));
}
revertSchur(m_tmp, m_fT, m_U);
res = m_tmp * res;
}
template<typename MatrixType>
template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
inline void MatrixPower<MatrixType>::revertSchur(
Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
const ComplexMatrix& T,
const ComplexMatrix& U)
{ res.noalias() = U * (T.template triangularView<Upper>() * U.adjoint()); }
template<typename MatrixType>
template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
inline void MatrixPower<MatrixType>::revertSchur(
Matrix<RealScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
const ComplexMatrix& T,
const ComplexMatrix& U)
{ res.noalias() = (U * (T.template triangularView<Upper>() * U.adjoint())).real(); }
/**
* \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix power of some matrix (expression).
*
* \tparam Derived type of the base, a matrix (expression).
*
* This class holds the arguments to the matrix power until it is
* assigned or evaluated for some other reason (so the argument
* should not be changed in the meantime). It is the return type of
* MatrixBase::pow() and related functions and most of the
* time this is the only way it is used.
*/
template<typename Derived>
class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Derived> >
{
public:
typedef typename Derived::PlainObject PlainObject;
typedef typename Derived::RealScalar RealScalar;
/**
* \brief Constructor.
*
* \param[in] A %Matrix (expression), the base of the matrix power.
* \param[in] p real scalar, the exponent of the matrix power.
*/
MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p)
{ }
/**
* \brief Compute the matrix power.
*
* \param[out] result \f$ A^p \f$ where \p A and \p p are as in the
* constructor.
*/
template<typename ResultType>
inline void evalTo(ResultType& result) const
{ MatrixPower<PlainObject>(m_A.eval()).compute(result, m_p); }
Index rows() const { return m_A.rows(); }
Index cols() const { return m_A.cols(); }
private:
const Derived& m_A;
const RealScalar m_p;
};
/**
* \ingroup MatrixFunctions_Module
*
* \brief Proxy for the matrix power of some matrix (expression).
*
* \tparam Derived type of the base, a matrix (expression).
*
* This class holds the arguments to the matrix power until it is
* assigned or evaluated for some other reason (so the argument
* should not be changed in the meantime). It is the return type of
* MatrixBase::pow() and related functions and most of the
* time this is the only way it is used.
*/
template<typename Derived>
class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerReturnValue<Derived> >
{
public:
typedef typename Derived::PlainObject PlainObject;
typedef typename std::complex<typename Derived::RealScalar> ComplexScalar;
/**
* \brief Constructor.
*
* \param[in] A %Matrix (expression), the base of the matrix power.
* \param[in] p complex scalar, the exponent of the matrix power.
*/
MatrixComplexPowerReturnValue(const Derived& A, const ComplexScalar& p) : m_A(A), m_p(p)
{ }
/**
* \brief Compute the matrix power.
*
* Because \p p is complex, \f$ A^p \f$ is simply evaluated as \f$
* \exp(p \log(A)) \f$.
*
* \param[out] result \f$ A^p \f$ where \p A and \p p are as in the
* constructor.
*/
template<typename ResultType>
inline void evalTo(ResultType& result) const
{ result = (m_p * m_A.log()).exp(); }
Index rows() const { return m_A.rows(); }
Index cols() const { return m_A.cols(); }
private:
const Derived& m_A;
const ComplexScalar m_p;
};
namespace internal {
template<typename MatrixPowerType>
struct traits< MatrixPowerParenthesesReturnValue<MatrixPowerType> >
{ typedef typename MatrixPowerType::PlainObject ReturnType; };
template<typename Derived>
struct traits< MatrixPowerReturnValue<Derived> >
{ typedef typename Derived::PlainObject ReturnType; };
template<typename Derived>
struct traits< MatrixComplexPowerReturnValue<Derived> >
{ typedef typename Derived::PlainObject ReturnType; };
}
template<typename Derived>
const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(const RealScalar& p) const
{ return MatrixPowerReturnValue<Derived>(derived(), p); }
template<typename Derived>
const MatrixComplexPowerReturnValue<Derived> MatrixBase<Derived>::pow(const std::complex<RealScalar>& p) const
{ return MatrixComplexPowerReturnValue<Derived>(derived(), p); }
} // namespace Eigen
#endif // EIGEN_MATRIX_POWER
| 23,422
| 32.177054
| 129
|
h
|
null |
LRMI-main/unsupported/Eigen/src/MoreVectorization/MathFunctions.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
#define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
namespace Eigen {
namespace internal {
/** \internal \returns the arcsin of \a a (coeff-wise) */
template<typename Packet> inline static Packet pasin(Packet a) { return std::asin(a); }
#ifdef EIGEN_VECTORIZE_SSE
template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x)
{
_EIGEN_DECLARE_CONST_Packet4f(half, 0.5);
_EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5);
_EIGEN_DECLARE_CONST_Packet4f(3half, 1.5);
_EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
_EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654);
_EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5);
_EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2);
_EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2);
_EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2);
_EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2);
_EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1);
Packet4f a = pabs(x);//got the absolute value
Packet4f sign_bit= _mm_and_ps(x, p4f_sign_mask);//extracted the sign bit
Packet4f z1,z2;//will need them during computation
//will compute the two branches for asin
//so first compare with half
Packet4f branch_mask= _mm_cmpgt_ps(a, p4f_half);//this is to select which branch to take
//both will be taken, and finally results will be merged
//the branch for values >0.5
{
//the core series expansion
z1=pmadd(p4f_minus_half,a,p4f_half);
Packet4f x1=psqrt(z1);
Packet4f s1=pmadd(p4f_asin1, z1, p4f_asin2);
Packet4f s2=pmadd(s1, z1, p4f_asin3);
Packet4f s3=pmadd(s2,z1, p4f_asin4);
Packet4f s4=pmadd(s3,z1, p4f_asin5);
Packet4f temp=pmul(s4,z1);//not really a madd but a mul by z so that the next term can be a madd
z1=pmadd(temp,x1,x1);
z1=padd(z1,z1);
z1=psub(p4f_pi_over_2,z1);
}
{
//the core series expansion
Packet4f x2=a;
z2=pmul(x2,x2);
Packet4f s1=pmadd(p4f_asin1, z2, p4f_asin2);
Packet4f s2=pmadd(s1, z2, p4f_asin3);
Packet4f s3=pmadd(s2,z2, p4f_asin4);
Packet4f s4=pmadd(s3,z2, p4f_asin5);
Packet4f temp=pmul(s4,z2);//not really a madd but a mul by z so that the next term can be a madd
z2=pmadd(temp,x2,x2);
}
/* select the correct result from the two branch evaluations */
z1 = _mm_and_ps(branch_mask, z1);
z2 = _mm_andnot_ps(branch_mask, z2);
Packet4f z = _mm_or_ps(z1,z2);
/* update the sign */
return _mm_xor_ps(z, sign_bit);
}
#endif // EIGEN_VECTORIZE_SSE
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
| 3,035
| 30.625
| 100
|
h
|
null |
LRMI-main/unsupported/Eigen/src/NonLinearOptimization/chkder.h
|
#define chkder_log10e 0.43429448190325182765
#define chkder_factor 100.
namespace Eigen {
namespace internal {
template<typename Scalar>
void chkder(
const Matrix< Scalar, Dynamic, 1 > &x,
const Matrix< Scalar, Dynamic, 1 > &fvec,
const Matrix< Scalar, Dynamic, Dynamic > &fjac,
Matrix< Scalar, Dynamic, 1 > &xp,
const Matrix< Scalar, Dynamic, 1 > &fvecp,
int mode,
Matrix< Scalar, Dynamic, 1 > &err
)
{
using std::sqrt;
using std::abs;
using std::log;
typedef DenseIndex Index;
const Scalar eps = sqrt(NumTraits<Scalar>::epsilon());
const Scalar epsf = chkder_factor * NumTraits<Scalar>::epsilon();
const Scalar epslog = chkder_log10e * log(eps);
Scalar temp;
const Index m = fvec.size(), n = x.size();
if (mode != 2) {
/* mode = 1. */
xp.resize(n);
for (Index j = 0; j < n; ++j) {
temp = eps * abs(x[j]);
if (temp == 0.)
temp = eps;
xp[j] = x[j] + temp;
}
}
else {
/* mode = 2. */
err.setZero(m);
for (Index j = 0; j < n; ++j) {
temp = abs(x[j]);
if (temp == 0.)
temp = 1.;
err += temp * fjac.col(j);
}
for (Index i = 0; i < m; ++i) {
temp = 1.;
if (fvec[i] != 0. && fvecp[i] != 0. && abs(fvecp[i] - fvec[i]) >= epsf * abs(fvec[i]))
temp = eps * abs((fvecp[i] - fvec[i]) / eps - err[i]) / (abs(fvec[i]) + abs(fvecp[i]));
err[i] = 1.;
if (temp > NumTraits<Scalar>::epsilon() && temp < eps)
err[i] = (chkder_log10e * log(temp) - epslog) / epslog;
if (temp >= eps)
err[i] = 0.;
}
}
}
} // end namespace internal
} // end namespace Eigen
| 1,864
| 26.835821
| 103
|
h
|
null |
LRMI-main/unsupported/Eigen/src/NonLinearOptimization/covar.h
|
namespace Eigen {
namespace internal {
template <typename Scalar>
void covar(
Matrix< Scalar, Dynamic, Dynamic > &r,
const VectorXi &ipvt,
Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
{
using std::abs;
typedef DenseIndex Index;
/* Local variables */
Index i, j, k, l, ii, jj;
bool sing;
Scalar temp;
/* Function Body */
const Index n = r.cols();
const Scalar tolr = tol * abs(r(0,0));
Matrix< Scalar, Dynamic, 1 > wa(n);
eigen_assert(ipvt.size()==n);
/* form the inverse of r in the full upper triangle of r. */
l = -1;
for (k = 0; k < n; ++k)
if (abs(r(k,k)) > tolr) {
r(k,k) = 1. / r(k,k);
for (j = 0; j <= k-1; ++j) {
temp = r(k,k) * r(j,k);
r(j,k) = 0.;
r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
}
l = k;
}
/* form the full upper triangle of the inverse of (r transpose)*r */
/* in the full upper triangle of r. */
for (k = 0; k <= l; ++k) {
for (j = 0; j <= k-1; ++j)
r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
r.col(k).head(k+1) *= r(k,k);
}
/* form the full lower triangle of the covariance matrix */
/* in the strict lower triangle of r and in wa. */
for (j = 0; j < n; ++j) {
jj = ipvt[j];
sing = j > l;
for (i = 0; i <= j; ++i) {
if (sing)
r(i,j) = 0.;
ii = ipvt[i];
if (ii > jj)
r(ii,jj) = r(i,j);
if (ii < jj)
r(jj,ii) = r(i,j);
}
wa[jj] = r(j,j);
}
/* symmetrize the covariance matrix in r. */
r.topLeftCorner(n,n).template triangularView<StrictlyUpper>() = r.topLeftCorner(n,n).transpose();
r.diagonal() = wa;
}
} // end namespace internal
} // end namespace Eigen
| 1,915
| 25.985915
| 101
|
h
|
null |
LRMI-main/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
|
namespace Eigen {
namespace internal {
template<typename FunctorType, typename Scalar>
DenseIndex fdjac1(
const FunctorType &Functor,
Matrix< Scalar, Dynamic, 1 > &x,
Matrix< Scalar, Dynamic, 1 > &fvec,
Matrix< Scalar, Dynamic, Dynamic > &fjac,
DenseIndex ml, DenseIndex mu,
Scalar epsfcn)
{
using std::sqrt;
using std::abs;
typedef DenseIndex Index;
/* Local variables */
Scalar h;
Index j, k;
Scalar eps, temp;
Index msum;
int iflag;
Index start, length;
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
const Index n = x.size();
eigen_assert(fvec.size()==n);
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
eps = sqrt((std::max)(epsfcn,epsmch));
msum = ml + mu + 1;
if (msum >= n) {
/* computation of dense approximate jacobian. */
for (j = 0; j < n; ++j) {
temp = x[j];
h = eps * abs(temp);
if (h == 0.)
h = eps;
x[j] = temp + h;
iflag = Functor(x, wa1);
if (iflag < 0)
return iflag;
x[j] = temp;
fjac.col(j) = (wa1-fvec)/h;
}
}else {
/* computation of banded approximate jacobian. */
for (k = 0; k < msum; ++k) {
for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
wa2[j] = x[j];
h = eps * abs(wa2[j]);
if (h == 0.) h = eps;
x[j] = wa2[j] + h;
}
iflag = Functor(x, wa1);
if (iflag < 0)
return iflag;
for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
x[j] = wa2[j];
h = eps * abs(wa2[j]);
if (h == 0.) h = eps;
fjac.col(j).setZero();
start = std::max<Index>(0,j-mu);
length = (std::min)(n-1, j+ml) - start + 1;
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
}
}
return 0;
}
} // end namespace internal
} // end namespace Eigen
| 2,225
| 26.825
| 113
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.