source
stringlengths
3
92
c
stringlengths
26
2.25M
AlloyDenseSolve.h
/* * Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef ALLOYDENSESOLVER_H_ #define ALLOYDENSESOLVER_H_ #include "AlloyMath.h" #include "AlloyImage.h" #include "AlloyDenseMatrix.h" namespace aly { bool SANITY_CHECK_DENSE_SOLVE(); bool SANITY_CHECK_ROBUST_SOLVE(); void PoissonBlend(const Image4f& in, Image4f& out, int iterations, int levels,float lambda = 0.99f, const std::function<bool(int,int)>& iterationMonitor = nullptr); void PoissonBlend(const Image4f& in, Image4f& out, int iterations,float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr); void PoissonBlend(const Image2f& in, Image2f& out, int iterations, int levels,float lambda = 0.99f, const std::function<bool(int,int)>& iterationMonitor = nullptr); void PoissonBlend(const Image2f& in, Image2f& out, int iterations,float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr); void PoissonInpaint(const Image4f& source, const Image4f& target, Image4f& out,int iterations, int levels, float lambda = 0.99f , const std::function<bool(int, int)>& iterationMonitor = nullptr); void PoissonInpaint(const Image4f& source, const Image4f& target, Image4f& out,int iterations, float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr); void PoissonInpaint(const Image2f& source, const Image2f& target, Image2f& out,int iterations, int levels, float lambda = 0.99f, const std::function<bool(int, int)>& iterationMonitor = nullptr); void PoissonInpaint(const Image2f& source, const Image2f& target, Image2f& out,int iterations, float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr); void LaplaceFill(const Image4f& sourceImg, Image4f& targetImg, int iterations,int levels, float lambda = 0.99f, const std::function<bool(int, int)>& iterationMonitor=nullptr); void LaplaceFill(const Image4f& sourceImg, Image4f& targetImg, int iterations,float lambda = 0.99f , const std::function<bool(int)>& iterationMonitor=nullptr); void LaplaceFill(const Image2f& sourceImg, Image2f& targetImg, int iterations,float lambda = 0.99f, const std::function<bool(int)>& iterationMonitor = nullptr); void LaplaceFill(const Image2f& sourceImg, Image2f& targetImg, int iterations,int levels, float lambda = 0.99f, const std::function<bool(int, int)>& iterationMonitor = nullptr); /****************************************************************************** * XLISP-STAT 2.1 Copyright (c) 1990, by Luke Tierney * XLISP version 2.1, Copyright (c) 1989, by David Betz. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Luke Tierney and David Betz not be * used in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. Luke Tierney and David Betz * make no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * LUKE TIERNEY AND DAVID BETZ DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, * IN NO EVENT SHALL LUKE TIERNEY NOR DAVID BETZ BE LIABLE FOR ANY SPECIAL, * INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. * * XLISP-STAT AUTHOR: * Luke Tierney * School of Statistics * University of Minnesota * Minneapolis, MN 55455 * (612) 625-7843 * * Email Address: * internet: luke@umnstat.stat.umn.edu * * XLISP AUTHOR: * David Betz * P.O. Box 144 * Peterborough, NH 03458 * (603) 924-4145 ****************************************************************************** * XLISP-STAT 2.1 was ported to the Amiga by * J.K. Lindsey * Faculty of Economic, Business and Social Sciences, * University of Liege, * Sart Tilman B31, * 4000 Liege, * Belgium * 32-41-56.29.64 * * The above permission and disclaimer also applies to all of the specifically * Amiga portions of this software, with the restriction that the Amiga * version not be used for any military-related applications. ****************************************************************************** */ template<class T, int C> void SVD(const DenseMatrix<T, C>& M, DenseMatrix<T, C>& U, DenseMatrix<T, C>& D, DenseMatrix<T, C>& Vt, double zeroTolerance = 0) { const int m = M.rows; const int n = M.cols; std::vector<std::vector<double>> v(n, std::vector<double>(n)); std::vector<std::vector<double>> u(m, std::vector<double>(m)); std::vector<double> w(n); std::vector<double> rv1(n); int flag, i, its, j, jj, k, l, nm; double c, f, h, s, x, y, z; double anorm, g, scale; U.resize(m, m); Vt.resize(n, n); D.resize(m, n); for (int cc = 0; cc < C; cc++) { anorm = 0.0, g = 0.0, scale = 0.0; if (m < n) { throw std::runtime_error( "SVD error, rows must be greater than or equal to cols."); } for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { u[i][j] = (double)M[i][j][cc]; } } for (i = 0; i < n; i++) { l = i + 1; rv1[i] = scale * g; g = s = scale = 0.0; if (i < m) { for (k = i; k < m; k++) scale += std::abs((double)u[k][i]); if (scale > zeroTolerance) { for (k = i; k < m; k++) { u[k][i] = ((double)u[k][i] / scale); s += ((double)u[k][i] * (double)u[k][i]); } f = (double)u[i][i]; g = -sign(std::sqrt(s), f); h = f * g - s; u[i][i] = (f - g); if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = i; k < m; k++) s += ((double)u[k][i] * (double)u[k][j]); f = s / h; for (k = i; k < m; k++) u[k][j] += (f * (double)u[k][i]); } } for (k = i; k < m; k++) u[k][i] = ((double)u[k][i] * scale); } } w[i] = (scale * g); g = s = scale = 0.0; if (i < m && i != n - 1) { for (k = l; k < n; k++) scale += std::abs((double)u[i][k]); if (scale > zeroTolerance) { for (k = l; k < n; k++) { u[i][k] = ((double)u[i][k] / scale); s += ((double)u[i][k] * (double)u[i][k]); } f = (double)u[i][l]; g = -sign(std::sqrt(s), f); h = f * g - s; u[i][l] = (f - g); for (k = l; k < n; k++) rv1[k] = (double)u[i][k] / h; if (i != m - 1) { for (j = l; j < m; j++) { for (s = 0.0, k = l; k < n; k++) s += ((double)u[j][k] * (double)u[i][k]); for (k = l; k < n; k++) u[j][k] += (s * rv1[k]); } } for (k = l; k < n; k++) u[i][k] = ((double)u[i][k] * scale); } } anorm = aly::max(anorm, (std::abs((double)w[i]) + std::abs(rv1[i]))); } for (i = n - 1; i >= 0; i--) { if (i < n - 1) { if (std::abs(g) > zeroTolerance) { for (j = l; j < n; j++) v[j][i] = (((double)u[i][j] / (double)u[i][l]) / g); for (j = l; j < n; j++) { for (s = 0.0, k = l; k < n; k++) s += ((double)u[i][k] * (double)v[k][j]); for (k = l; k < n; k++) v[k][j] += (s * (double)v[k][i]); } } for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0; } v[i][i] = 1.0; g = rv1[i]; l = i; } for (i = n - 1; i >= 0; i--) { l = i + 1; g = (double)w[i]; if (i < n - 1) for (j = l; j < n; j++) u[i][j] = 0.0; if (std::abs(g) > zeroTolerance) { g = 1.0 / g; if (i != n - 1) { for (j = l; j < n; j++) { for (s = 0.0, k = l; k < m; k++) s += ((double)u[k][i] * (double)u[k][j]); f = (s / (double)u[i][i]) * g; for (k = i; k < m; k++) u[k][j] += (f * (double)u[k][i]); } } for (j = i; j < m; j++) u[j][i] = ((double)u[j][i] * g); } else { for (j = i; j < m; j++) u[j][i] = 0.0; } ++u[i][i]; } for (k = n - 1; k >= 0; k--) { for (its = 0; its < 30; its++) { flag = 1; for (l = k; l >= 0; l--) { nm = l - 1; if (std::abs(rv1[l]) + anorm == anorm) { flag = 0; break; } if (nm >= 0 && std::abs((double)w[nm]) + anorm == anorm) { break; } } if (flag) { c = 0.0; s = 1.0; for (i = l; i <= k; i++) { f = s * rv1[i]; if (std::abs(f) + anorm != anorm) { g = (double)w[i]; h = pythag(f, g); w[i] = h; h = 1.0 / h; c = g * h; s = (-f * h); for (j = 0; j < m; j++) { y = (double)u[j][nm]; z = (double)u[j][i]; u[j][nm] = (y * c + z * s); u[j][i] = (z * c - y * s); } } } } z = (double)w[k]; if (l == k) { if (z < 0.0) { w[k] = (-z); for (j = 0; j < n; j++) v[j][k] = (-v[j][k]); } int iii, jjj; for (iii = k; (iii < n - 1) && (w[iii] < w[iii + 1]); iii++) { std::swap(w[iii], w[iii + 1]); for (jjj = 0; jjj < m; jjj++) std::swap(u[jjj][iii], u[jjj][iii + 1]); for (jjj = 0; jjj < n; jjj++) std::swap(v[jjj][iii], v[jjj][iii + 1]); } break; } if (its >= 30) { throw std::runtime_error("SVD did not converge."); } x = (double)w[l]; nm = k - 1; y = (double)w[nm]; g = rv1[nm]; h = rv1[k]; f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y); g = pythag(f, 1.0); f = ((x - z) * (x + z) + h * ((y / (f + sign(g, f))) - h)) / x; c = s = 1.0; for (j = l; j <= nm; j++) { i = j + 1; g = rv1[i]; y = (double)w[i]; h = s * g; g = c * g; z = pythag(f, h); rv1[j] = z; c = f / z; s = h / z; f = x * c + g * s; g = g * c - x * s; h = y * s; y = y * c; for (jj = 0; jj < n; jj++) { x = (double)v[jj][j]; z = (double)v[jj][i]; v[jj][j] = (x * c + z * s); v[jj][i] = (z * c - x * s); } z = pythag(f, h); w[j] = z; if (z) { z = 1.0 / z; c = f * z; s = h * z; } f = (c * g) + (s * y); x = (c * y) - (s * g); for (jj = 0; jj < m; jj++) { y = (double)u[jj][j]; z = (double)u[jj][i]; u[jj][j] = (y * c + z * s); u[jj][i] = (z * c - y * s); } } rv1[l] = 0.0; rv1[k] = f; w[k] = x; } } for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { if (i == j) { D[i][j][cc] = (T)w[j]; } else { D[i][j][cc] = T(0); } } } for (int i = 0; i < m; i++) { for (int j = 0; j < m; j++) { U[i][j][cc] = (T)u[i][j]; } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { Vt[i][j][cc] = (T)v[j][i]; } } } } template<class T, int C> DenseMatrix<T, C> inverse(const DenseMatrix<T, C>& M, double zeroTolerance = 0.0) { if (M.rows != M.cols) { throw std::runtime_error(MakeString()<<"Could not invert matrix. Rows and columns must agree: [" << M.rows << ", " << M.cols<<"]"); } DenseMatrix<T, C> U, D, Vt; SVD(M, U, D, Vt); int K = aly::min(D.rows, D.cols); for (int k = 0; k < K; k++) { vec<double, C> d = vec<double, C>(D[k][k]); for (int c = 0; c < C; c++) { if (std::abs(d[c]) > zeroTolerance) { d[c] = 1.0 / d[c]; } } D[k][k] = vec<T, C>(d); } return (U * D * Vt).transpose(); } template<class T, int C> Vector<T, C> SolveSVD(const DenseMatrix<T, C>& A, const Vector<T, C>& b) { if (A.rows != (int)b.size()) { throw std::runtime_error( MakeString() << "Matrix row dimensions and vector length must agree. A=[" << A.rows << "," << A.cols << "] b=[" << b.size() << "]"); } if (A.rows != A.cols) { DenseMatrix<T, C> At = A.transpose(); DenseMatrix<T, C> AtA = At * A; Vector<T, C> Atb = At * b; return inverse(AtA) * Atb; } else { return inverse(A) * b; } } //Back port of NIST's Java Implementation of LINPACK called JAMA. Code is licensed for free use in the public domain. http://math.nist.gov/javanumerics/jama/ /** LU Decomposition. <P> For an m-by-n matrix A with m >= n, the LU decomposition is an m-by-n unit lower triangular matrix L, an n-by-n upper triangular matrix U, and a permutation vector piv of length m so that A(piv,:) = L*U. If m < n, then L is m-by-m and U is m-by-n. <P> The LU decompostion with pivoting always exists, even if the matrix is singular, so the constructor will never fail. The primary use of the LU decomposition is in the solution of square systems of simultaneous linear equations. This will fail if isNonsingular() returns false. */ template<class T, int C> bool LU(const DenseMatrix<T, C>& A, DenseMatrix<T, 1>& L, DenseMatrix<T, 1>& U, std::vector<int>& piv, int cc = 0, const double zeroTolerance = 0.0) { const int m = A.rows; const int n = A.cols; std::vector<std::vector<double>> LU(m, std::vector<double>(n, 0.0)); piv.resize(m); std::vector<double> LUcolj(m, 0.0); int pivsign; double* LUrowi; L.resize(m, n); U.resize(n, n); bool nonSingular = true; for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { LU[i][j] = (double)A[i][j][cc]; } } for (int i = 0; i < m; i++) { piv[i] = i; } pivsign = 1; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { LUcolj[i] = LU[i][j]; } for (int i = 0; i < m; i++) { LUrowi = &LU[i][0]; int kmax = aly::min(i, j); double s = 0.0; for (int k = 0; k < kmax; k++) { s += LUrowi[k] * LUcolj[k]; } LUrowi[j] = LUcolj[i] -= s; } int p = j; for (int i = j + 1; i < m; i++) { if (std::abs(LUcolj[i]) > std::abs(LUcolj[p])) { p = i; } } if (p != j) { for (int k = 0; k < n; k++) { std::swap(LU[p][k], LU[j][k]); } std::swap(piv[p], piv[j]); pivsign = -pivsign; } if (j < m && std::abs(LU[j][j]) > zeroTolerance) { for (int i = j + 1; i < m; i++) { LU[i][j] /= LU[j][j]; } } } for (int j = 0; j < n; j++) { if (std::abs(LU[j][j]) <= zeroTolerance) { nonSingular = false; break; } } for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { if (i > j) { L[i][j].x = (T)LU[i][j]; } else if (i == j) { L[i][j].x = T(1.0); } else { L[i][j].x = T(0.0); } } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i <= j) { U[i][j].x = T(LU[i][j]); } else { U[i][j].x = T(0.0); } } } return nonSingular; } template<class T, int C> Vector<T, C> SolveLU(const DenseMatrix<T, C>& A, const Vector<T, C>& b) { if (A.rows != (int)b.size()) { throw std::runtime_error( MakeString() << "Matrix row dimensions and vector length must agree. A=[" << A.rows << "," << A.cols << "] b=[" << b.size() << "]"); } if (A.rows != A.cols) { DenseMatrix<T, C> At = A.transpose(); DenseMatrix<T, C> AtA = At * A; Vector<T, C> Atb = At * b; int n = AtA.cols; Vector<T, C> x(A.cols); Vector<T, C> y(A.cols); DenseMatrix<T, 1> L, U; std::vector<int> piv; for (int cc = 0; cc < C; cc++) { bool nonSingular = LU(AtA, L, U, piv, cc); if (!nonSingular) { throw std::runtime_error("Matrix is singular."); } // Forward solve Ly = b for (int i = 0; i < n; i++) { y[i][cc] = Atb[piv[i]][cc]; for (int j = 0; j < i; j++) { y[i][cc] -= L[i][j].x * y[j][cc]; } y[i][cc] /= L[i][i].x; } // Backward solve Ux = y for (int i = n - 1; i >= 0; i--) { x[i][cc] = y[i][cc]; for (int j = i + 1; j < n; j++) { x[i][cc] -= U[i][j].x * x[j][cc]; } x[i][cc] /= U[i][i].x; } } return x; } else { int n = A.cols; Vector<T, C> x(A.cols); Vector<T, C> y(A.cols); DenseMatrix<T, 1> L, U; std::vector<int> piv; for (int cc = 0; cc < C; cc++) { bool nonSingular = LU(A, L, U, piv, cc); if (!nonSingular) { throw std::runtime_error("Matrix is singular."); } // Forward solve Ly = b for (int i = 0; i < n; i++) { y[i][cc] = b[piv[i]][cc]; for (int j = 0; j < i; j++) { y[i][cc] -= L[i][j].x * y[j][cc]; } y[i][cc] /= L[i][i].x; } // Backward solve Ux = y for (int i = n - 1; i >= 0; i--) { x[i][cc] = y[i][cc]; for (int j = i + 1; j < n; j++) { x[i][cc] -= U[i][j].x * x[j][cc]; } x[i][cc] /= U[i][i].x; } } return x; } } /** QR Decomposition. <P> For an m-by-n matrix A with m >= n, the QR decomposition is an m-by-n orthogonal matrix Q and an n-by-n upper triangular matrix R so that A = Q*R. <P> The QR decompostion always exists, even if the matrix does not have full rank, so the constructor will never fail. The primary use of the QR decomposition is in the least squares solution of nonsquare systems of simultaneous linear equations. This will fail if isFullRank() returns false. */ template<class T, int C> bool QR(const DenseMatrix<T, C>& A, DenseMatrix<T, C>& Q, DenseMatrix<T, C>& R) { const int m = A.rows; const int n = A.cols; std::vector<std::vector<double>> QR(m, std::vector<double>(n)); std::vector<double> Rdiag(m); R.resize(n, n); Q.resize(m, n); bool nonSingular = true; for (int cc = 0; cc < C; cc++) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { QR[i][j] = (double)A[i][j][cc]; } } for (int k = 0; k < n; k++) { double nrm = 0; for (int i = k; i < m; i++) { nrm = pythag(nrm, QR[i][k]); } if (nrm != 0.0) { if (QR[k][k] < 0) { nrm = -nrm; } for (int i = k; i < m; i++) { QR[i][k] /= nrm; } QR[k][k] += 1.0; for (int j = k + 1; j < n; j++) { double s = 0.0; for (int i = k; i < m; i++) { s += QR[i][k] * QR[i][j]; } s = -s / QR[k][k]; for (int i = k; i < m; i++) { QR[i][j] += s * QR[i][k]; } } } Rdiag[k] = -nrm; } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i < j) { R[i][j][cc] = T(QR[i][j]); } else if (i == j) { R[i][j][cc] = T(Rdiag[i]); } else { R[i][j][cc] = T(0.0); } } } for (int j = 0; j < n; j++) { if (Rdiag[j] == 0) { nonSingular = false; break; } } for (int k = n - 1; k >= 0; k--) { for (int i = 0; i < m; i++) { Q[i][k][cc] = T(0.0); } Q[k][k][cc] = T(1.0); for (int j = k; j < n; j++) { if (QR[k][k] != 0) { double s = 0.0; for (int i = k; i < m; i++) { s += QR[i][k] * Q[i][j][cc]; } s = -s / QR[k][k]; for (int i = k; i < m; i++) { Q[i][j][cc] += T(s * QR[i][k]); } } } } } return nonSingular; } template<class T, int C> Vector<T, C> SolveQR(const DenseMatrix<T, C>& A, const Vector<T, C>& b) { if (A.rows != (int)b.size()) { throw std::runtime_error( MakeString() << "Matrix row dimensions and vector length must agree. A=[" << A.rows << "," << A.cols << "] b=[" << b.size() << "]"); } if (A.rows != A.cols) { DenseMatrix<T, C> At = A.transpose(); DenseMatrix<T, C> AtA = At * A; Vector<T, C> Atb = At * b; int n = AtA.cols; Vector<T, C> x(A.cols); DenseMatrix<T, C> Q, R; bool nonSingular = QR(AtA, Q, R); if (!nonSingular) { throw std::runtime_error("Matrix is singular."); } // Compute Y = transpose(Q)*B x = Q.transpose() * Atb; // Solve R*X = Y; for (int k = n - 1; k >= 0; k--) { x[k] /= R[k][k]; for (int i = 0; i < k; i++) { x[i] -= x[k] * R[i][k]; } } return x; } else { int n = A.cols; Vector<T, C> x(A.cols); DenseMatrix<T, C> Q, R; bool nonSingular = QR(A, Q, R); if (!nonSingular) { throw std::runtime_error("Matrix is singular."); } // Compute Y = transpose(Q)*B x = Q.transpose() * b; // Solve R*X = Y; for (int k = n - 1; k >= 0; k--) { x[k] /= R[k][k]; for (int i = 0; i < k; i++) { x[i] -= x[k] * R[i][k]; } } return x; } } enum class MatrixFactorization { SVD, QR, LU }; template<class C, class R> std::basic_ostream<C, R> & operator <<( std::basic_ostream<C, R> & ss, const MatrixFactorization& type) { switch (type) { case MatrixFactorization::SVD: return ss << "SVD"; case MatrixFactorization::QR: return ss << "QR"; case MatrixFactorization::LU: return ss << "LU"; } return ss; } template<class T, int C> Vector<T, C> Solve(const DenseMatrix<T, C>& A, const Vector<T, C>& b, MatrixFactorization factor = MatrixFactorization::SVD) { switch (factor) { case MatrixFactorization::SVD: return SolveSVD(A, b); case MatrixFactorization::QR: return SolveQR(A, b); case MatrixFactorization::LU: return SolveLU(A, b); } return Vector<T, C>(); } template<class T, int C> Vector<T, C> SolveRobust(const DenseMatrix<T, C>& A, const Vector<T, C>& b, int p = 1, int iterations = 100, double errorTolerance = 1E-6f, double zeroTolerance = 1E-16, MatrixFactorization factor = MatrixFactorization::SVD) { int N = (int)b.size(); Vector<T, C> W(N); W.set(vec<T, C>(T(1))); Vector<T, C> X; double lastError = std::numeric_limits<double>::max(); for (int iter = 0;iter < iterations;iter++) { X = SolveQR(W*A, W*b); Vector<T, C> R = b - A*X; vec<double, C> err = lengthVecSqr(R); double e = lengthL1(err) / N; if (std::abs(e - lastError) < errorTolerance) { break; } lastError = e; #pragma omp parallel for for (int n = 0;n < N;n++) { vec<T, C> w; vec<T, C> r = R[n]; for (int c = 0;c < C;c++) { w[c] = std::pow(std::max(std::abs(r[c]), T(zeroTolerance)), -p); } W[n] = w; } } return X; } template<class T, int C> Vector<T, C> SolveRansac(const DenseMatrix<T, C>& A, const Vector<T, C>& b, int sampleSize, double inlierTolerance = 0.01f, int iterations = 100) { int N = (int)b.size(); DenseMatrix<T, C> As; Vector<T, C> bs; std::vector<int> order(N); std::random_device rd; std::mt19937 g(rd()); for (int n = 0;n < N;n++) { order[n] = n; } Vector<T, C> X; if (N <= sampleSize) { return SolveQR(A, b); } std::shuffle(order.begin(), order.end(), g); Vector<T, C> W(N); int bestInliner = 0; Vector<T, C> BestX; int offset = 0; for (int iter = 0;iter < iterations;iter++) { W.set(vec<T, C>(T(0.0))); As.resize(sampleSize, A.cols); bs.resize(sampleSize); for (int i = 0;i < sampleSize;i++) { int idx = order[(i + offset)%N]; As[i] = A[idx]; bs[i] = b[idx]; } X = SolveQR(As, bs); Vector<T, C> R = b - A*X; int count = 0; for (int n = 0;n < N;n++) { if (lengthL1(R[n]) < inlierTolerance) { count++; } } if (count > bestInliner) { BestX = X; bestInliner = count; } if (count > std::max(N / 2, sampleSize))break;//Good enough set of inliers, break offset += sampleSize; if (offset >= N) { std::shuffle(order.begin(), order.end(), g); offset = 0; } } if (bestInliner<A.cols) { return SolveQR(A, b); } order.clear(); Vector<T, C> R = b - A*BestX; for (int n = 0;n < N;n++) { if (lengthL1(R[n]) < inlierTolerance) { order.push_back(n); } } if ((int)order.size() < sampleSize) { return BestX; } As.resize((int)order.size(), A.cols); bs.resize((int)order.size()); for (int i = 0;i < (int)order.size();i++) { int idx = order[i]; As[i] = A[idx]; bs[i] = b[idx]; } X = SolveQR(As, bs); return X; } } #endif
mm.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> #include <gsl/gsl_cblas.h> #define N 5000; void mm_bruteforce_ijk(double *a, double *b, double *c, int I, int K, int J) { for(int i = 0; i < I; i++) { for(int j = 0; j < J; j++) { for(int k = 0; k < K; k++) { c[i * J + j] += a[i * K + k] * b[k * J + j]; } } } } void mm_bruteforce_ikj(double *a, double *b, double *c, int I, int K, int J) { for(int i = 0; i < I; i++) { for(int k = 0; k < K; k++) { double dv = a[i * K + k]; for(int j = 0; j < J; j++) { c[i * J + j] += dv * b[k * J + j]; } } } } void mm_omp(double *a, double *b, double *c, int I, int K, int J) { #pragma omp parallel for for(int i = 0; i < I; i++) { for(int k = 0; k < K; k++) { register double dv = a[i * K + k]; for(int j = 0; j < J; j++) { c[i * J + j] += dv * b[k * J + j]; } } } } void mm_cblas_dgemm(double *a, double *b, double *c, int p, int q, int r) { int l = p; int m = q; int n = r; int lda = m; int ldb = n; int ldc = n; double alpha = 1.0; double beta = 0.0; cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, l, n, m, alpha, a, lda, b, ldb, beta, c, ldc); } void init_arange(double *mat, int a, int b) { for(int i = 0; i < (a*b); i++) { mat[i] = i + 1; } } void init_value(double *mat, int a, int b, double value) { for(int i = 0; i < (a*b); i++) { mat[i] = value; } } void init_zero(double *mat, int a, int b) { init_value(mat, a, b, 0); } void init_one(double *mat, int a, int b) { init_value(mat, a, b, 1); } void timer_start(struct timeval *pstv) { gettimeofday(pstv, NULL); } void timer_end(struct timeval *petv) { gettimeofday(petv, NULL); } void timer_print(struct timeval *pstv, struct timeval *petv) { time_t sec; suseconds_t usec; sec = petv->tv_sec - pstv->tv_sec; usec = petv->tv_usec - pstv->tv_usec; if(usec < 0) { sec--; usec += 1000000; } printf("elapsed time : %ld.%ld\n", sec, usec); } typedef void (*fptr_mm)(double *a, double *b, double *c, int I, int K, int J); void check_etime_mm(double *a, double *b, double *c, int I, int K, int J, fptr_mm mm) { struct timeval stv; struct timeval etv; init_zero(c, I, J); timer_start(&stv); mm(a, b, c, I, K, J); timer_end(&etv); timer_print(&stv, &etv); } int main(int argc, char *argv[]) { int I, K, J; I = K = J = N; double *a = (double*)malloc(sizeof(double) * I * K); double *b = (double*)malloc(sizeof(double) * K * J); double *c = (double*)malloc(sizeof(double) * I * J); init_arange(a, I, K); init_arange(b, K, J); check_etime_mm(a, b, c, I, K, J, mm_bruteforce_ijk); check_etime_mm(a, b, c, I, K, J, mm_bruteforce_ikj); check_etime_mm(a, b, c, I, K, J, mm_omp); check_etime_mm(a, b, c, I, K, J, mm_cblas_dgemm); return 0; }
Kernel_3d_DGZ.h
#ifndef KRIPKE_KERNEL_3D_DGZ_H__ #define KRIPKE_KERNEL_3D_DGZ_H__ #include<Kripke/Kernel.h> #include<Grid.h> class Kernel_3d_DGZ : public Kernel { public: typedef std::vector<std::vector<double>> result_type; // Grid is needed to access metadata (e.g. gd_sets) stored on it. Grid_Data* grid_data; int group_set; int direction_set; Kernel_3d_DGZ(Grid_Data*); virtual ~Kernel_3d_DGZ(); virtual Nesting_Order nestingPsi(void) const; virtual Nesting_Order nestingPhi(void) const; virtual void LTimes(Grid_Data *grid_data); virtual void LPlusTimes(Grid_Data *grid_data); template<typename GridView, typename IPlane, typename JPlane, typename KPlane> result_type operator()(GridView& grid_view, IPlane const& i_plane, JPlane const& j_plane, KPlane const& k_plane); void define_type(stapl::typer& t) { t.member(grid_data); t.member(group_set); t.member(direction_set); } }; /* Sweep routine for Diamond-Difference */ /* Macros for offsets with fluxes on cell faces */ #define I_PLANE_INDEX(j, k) (k)*(local_jmax) + (j) #define J_PLANE_INDEX(i, k) (k)*(local_imax) + (i) #define K_PLANE_INDEX(i, j) (j)*(local_imax) + (i) #define Zonal_INDEX(i, j, k) (i) + (local_imax)*(j) \ + (local_imax)*(local_jmax)*(k) template<typename GridView, typename IPlane, typename JPlane, typename KPlane> std::vector<std::vector<double>> Kernel_3d_DGZ::operator()(GridView& grid_view, IPlane const& i_plane_in, JPlane const& j_plane_in, KPlane const& k_plane_in) { typedef std::array<typename GridView::value_type::property_type:: storage_type::index, 2> index_type; result_type result(3); std::vector<double> i_plane = i_plane_in[0]; std::vector<double> j_plane = j_plane_in[0]; std::vector<double> k_plane = k_plane_in[0]; // grid_data, group_set, and direction_set are data members of the Kernel Group_Dir_Set& gd_set = grid_data->gd_sets()[group_set][direction_set]; int num_directions = gd_set.num_directions; int num_groups = gd_set.num_groups; Directions *direction = gd_set.directions; //int num_zones = grid_data->num_zones(); int local_imax = grid_data->nzones()[0]; int local_jmax = grid_data->nzones()[1]; int local_kmax = grid_data->nzones()[2]; // Comment copied blindly // TGS : compiler detects unused variable. Are the macros correct? // int local_kmax = grid_data->nzones()[2]; auto dx = grid_data->deltas(0); auto dy = grid_data->deltas(1); auto dz = grid_data->deltas(2); // All directions have same id,jd,kd, since these are all one Direction Set // So pull that information out now int octant = direction[0].octant; Grid_Sweep_Block const &extent = grid_data->octant_extent()[octant]; std::vector<double> xcos_dxi_all(local_imax); std::vector<double> ycos_dyj_all(local_jmax); std::vector<double> zcos_dzk_all(local_kmax); for (int d = 0; d < num_directions; ++d) { double xcos = direction[d].xcos; double ycos = direction[d].ycos; double zcos = direction[d].zcos; index_type psi_z_idx{{0, d}}; index_type rhs_z_idx{{0, d}}; for (int i = 0; i < local_imax; ++i) xcos_dxi_all[i] = 2.0 * xcos / dx[i + 1]; for (int j = 0; j < local_jmax; ++j) ycos_dyj_all[j] = 2.0 * ycos / dy[j + 1]; for (int k = 0; k < local_kmax; ++k) zcos_dzk_all[k] = 2.0 * zcos / dz[k + 1]; #ifdef KRIPKE_USE_OPENMP #pragma omp parallel for #endif for (int group = 0; group < num_groups; ++group) { index_type sigt_idx{{gd_set.group0 + group, 0}}; psi_z_idx[0] = group; rhs_z_idx[0] = group; int plane_idx = num_directions * num_groups + d * num_groups + group; for (int i = extent.start_i; i != extent.end_i; i += extent.inc_i) { double xcos_dxi = 2.0 * xcos / xcos_dxi_all[i]; for (int j = extent.start_j; j != extent.end_j; j += extent.inc_j) { double ycos_dyj = 2.0 * ycos / ycos_dyj_all[j]; double & psi_bo_d_g_z = k_plane[K_PLANE_INDEX(i, j) * plane_idx]; for (int k = extent.start_k; k != extent.end_k; k += extent.inc_k) { double zcos_dzk = 2.0 * zcos / zcos_dzk_all[k]; // get a reference to the vertex being processed int z = Zonal_INDEX(i, j, k); auto v = (*grid_view.find_vertex(z)).property(); double & psi_lf_d_g_z = i_plane[I_PLANE_INDEX(j, k) * plane_idx]; double & psi_fr_d_g_z = j_plane[J_PLANE_INDEX(i, k) * plane_idx]; /* Calculate new zonal flux */ double psi_d_g_z = (v.rhs()[group_set][direction_set](rhs_z_idx) + psi_lf_d_g_z * xcos_dxi + psi_fr_d_g_z * ycos_dyj + psi_bo_d_g_z * zcos_dzk) / (xcos_dxi + ycos_dyj + zcos_dzk + v.sigt()(sigt_idx)); v.psi()[group_set][direction_set](psi_z_idx) = psi_d_g_z; /* Apply diamond-difference relationships */ psi_d_g_z *= 2.0; psi_lf_d_g_z = psi_d_g_z - psi_lf_d_g_z; psi_fr_d_g_z = psi_d_g_z - psi_fr_d_g_z; psi_bo_d_g_z = psi_d_g_z - psi_bo_d_g_z; } } } } // Group } // Direction result[0] = std::move(i_plane); result[1] = std::move(j_plane); result[2] = std::move(k_plane); return result; } #endif
DRB018-plusplus-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Data race on outLen due to ++ operation. Adding private (outLen) can avoid race condition. But it is wrong semantically. Data races on outLen also cause output[outLen++] to have data races. Data race pairs (we allow two pairs to preserve the original code pattern): 1. outLen@72 vs. outLen@72 2. output[]@72 vs. output[]@72 */ #include <stdlib.h> #include <stdio.h> int input[1000]; int output[1000]; int main() { int i ; int inLen=1000 ; int outLen = 0; for (i=0; i<inLen; ++i) input[i]= i; #pragma omp parallel for for (i=0; i<inLen; ++i) { output[outLen++] = input[i] ; } printf("output[500]=%d\n",output[500]); return 0; }
GB_AxB_rowscale_meta.c
//------------------------------------------------------------------------------ // GB_AxB_rowscale_meta: C=D*B where D is a square diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All entries in C=D*B are computed fully in parallel. { // Bx is unused if the operator is FIRST #include "GB_unused.h" //-------------------------------------------------------------------------- // get C, D, and B //-------------------------------------------------------------------------- const GB_ATYPE *restrict Dx = D_is_pattern ? NULL : D->x ; const GB_BTYPE *restrict Bx = B_is_pattern ? NULL : B->x ; const int64_t *restrict Bi = B->i ; int64_t bnz = GB_NNZ (B) ; //-------------------------------------------------------------------------- // C=D*B //-------------------------------------------------------------------------- int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (bnz, ntasks) ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t pstart, pend ; GB_PARTITION (pstart, pend, bnz, tid, ntasks) ; GB_PRAGMA_VECTORIZE for (int64_t p = pstart ; p < pend ; p++) { int64_t i = Bi [p] ; // get row index of B(i,j) GB_GETA (dii, Dx, i) ; // dii = D(i,i) GB_GETB (bij, Bx, p) ; // bij = B(i,j) GB_BINOP (GB_CX (p), dii, bij) ; // C(i,j) = dii*bij } } }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p++; } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property); return(BilevelImage(image,QuantumRange*threshold/100.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelAlpha(q,(MagickRealType) GetPixelAlpha(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex( indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImageChannel) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedPosterizeImageChannel) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImageChannel) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
sample_task_single_producer.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> int main(int argc, char *argv[]) { int i, num = (argc > 1) ? atoi(argv[1]) : 100; int nthreads; struct timeval t_start, t_end; double time; double *a = (double *)malloc(sizeof(double) * num); #pragma omp parallel { nthreads = omp_get_num_threads(); } for (i = 0; i < num; i++) { a[i] = i; } gettimeofday(&t_start, NULL); #pragma omp parallel { #pragma omp single { for (i = 0; i < num; i++) { #pragma omp task { a[i] *= 0.9; } } } } gettimeofday(&t_end, NULL); time = (t_end.tv_sec * 1000000 + t_end.tv_usec) - (t_start.tv_sec * 1000000 + t_start.tv_usec); printf("%d %f\n", nthreads, time / 1000000.0); for (i = 0; i < num; i++) { if (a[i] != i * 0.9) { printf("a[%d]=%f != %f\n", i, a[i], i * 0.9); return 1; } } free(a); }
real_self_energy.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "real_self_energy.h" #include <math.h> #include <stdlib.h> #include "phonoc_array.h" #include "phonoc_utils.h" #include "real_to_reciprocal.h" static double get_real_self_energy_at_band( const long band_index, const Darray *fc3_normal_squared, const double fpoint, const double *frequencies, const long (*triplets)[3], const long *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency); static double sum_real_self_energy_at_band( const long num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs1, const double *freqs2, const double epsilon, const double temperature, const double cutoff_frequency); static double sum_real_self_energy_at_band_0K( const long num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs1, const double *freqs2, const double epsilon, const double cutoff_frequency); void rse_get_real_self_energy_at_bands( double *real_self_energy, const Darray *fc3_normal_squared, const long *band_indices, const double *frequencies, const long (*triplets)[3], const long *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, num_band0, num_band, gp0; double fpoint; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; gp0 = triplets[0][0]; /* num_band0 and num_band_indices have to be same. */ for (i = 0; i < num_band0; i++) { fpoint = frequencies[gp0 * num_band + band_indices[i]]; if (fpoint < cutoff_frequency) { real_self_energy[i] = 0; } else { real_self_energy[i] = get_real_self_energy_at_band( i, fc3_normal_squared, fpoint, frequencies, triplets, triplet_weights, epsilon, temperature, unit_conversion_factor, cutoff_frequency); } } } void rse_get_real_self_energy_at_frequency_point( double *real_self_energy, const double frequency_point, const Darray *fc3_normal_squared, const long *band_indices, const double *frequencies, const long (*triplets)[3], const long *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, num_band0; num_band0 = fc3_normal_squared->dims[1]; /* num_band0 and num_band_indices have to be same. */ for (i = 0; i < num_band0; i++) { if (frequency_point < cutoff_frequency) { real_self_energy[i] = 0; } else { real_self_energy[i] = get_real_self_energy_at_band( i, fc3_normal_squared, frequency_point, frequencies, triplets, triplet_weights, epsilon, temperature, unit_conversion_factor, cutoff_frequency); } } } static double get_real_self_energy_at_band( const long band_index, const Darray *fc3_normal_squared, const double fpoint, const double *frequencies, const long (*triplets)[3], const long *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, num_triplets, num_band0, num_band, gp1, gp2; double shift; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; shift = 0; #ifdef _OPENMP #pragma omp parallel for private(gp1, gp2) reduction(+ : shift) #endif for (i = 0; i < num_triplets; i++) { gp1 = triplets[i][1]; gp2 = triplets[i][2]; if (temperature > 0) { shift += sum_real_self_energy_at_band( num_band, fc3_normal_squared->data + i * num_band0 * num_band * num_band + band_index * num_band * num_band, fpoint, frequencies + gp1 * num_band, frequencies + gp2 * num_band, epsilon, temperature, cutoff_frequency) * triplet_weights[i] * unit_conversion_factor; } else { shift += sum_real_self_energy_at_band_0K( num_band, fc3_normal_squared->data + i * num_band0 * num_band * num_band + band_index * num_band * num_band, fpoint, frequencies + gp1 * num_band, frequencies + gp2 * num_band, epsilon, cutoff_frequency) * triplet_weights[i] * unit_conversion_factor; } } return shift; } static double sum_real_self_energy_at_band( const long num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs1, const double *freqs2, const double epsilon, const double temperature, const double cutoff_frequency) { long i, j; double n1, n2, f1, f2, f3, f4, shift; /* double sum; */ shift = 0; for (i = 0; i < num_band; i++) { if (freqs1[i] > cutoff_frequency) { n1 = phonoc_bose_einstein(freqs1[i], temperature); for (j = 0; j < num_band; j++) { if (freqs2[j] > cutoff_frequency) { n2 = phonoc_bose_einstein(freqs2[j], temperature); f1 = fpoint + freqs1[i] + freqs2[j]; f2 = fpoint - freqs1[i] - freqs2[j]; f3 = fpoint - freqs1[i] + freqs2[j]; f4 = fpoint + freqs1[i] - freqs2[j]; /* sum = 0; * if (fabs(f1) > epsilon) { * sum -= (n1 + n2 + 1) / f1; * } * if (fabs(f2) > epsilon) { * sum += (n1 + n2 + 1) / f2; * } * if (fabs(f3) > epsilon) { * sum -= (n1 - n2) / f3; * } * if (fabs(f4) > epsilon) { * sum += (n1 - n2) / f4; * } * shift += sum * fc3_normal_squared[i * num_band + j]; */ shift += (-(n1 + n2 + 1) * f1 / (f1 * f1 + epsilon * epsilon) + (n1 + n2 + 1) * f2 / (f2 * f2 + epsilon * epsilon) - (n1 - n2) * f3 / (f3 * f3 + epsilon * epsilon) + (n1 - n2) * f4 / (f4 * f4 + epsilon * epsilon)) * fc3_normal_squared[i * num_band + j]; } } } } return shift; } static double sum_real_self_energy_at_band_0K( const long num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs1, const double *freqs2, const double epsilon, const double cutoff_frequency) { long i, j; double f1, f2, shift; shift = 0; for (i = 0; i < num_band; i++) { if (freqs1[i] > cutoff_frequency) { for (j = 0; j < num_band; j++) { if (freqs2[j] > cutoff_frequency) { f1 = fpoint + freqs1[i] + freqs2[j]; f2 = fpoint - freqs1[i] - freqs2[j]; shift += (-1 * f1 / (f1 * f1 + epsilon * epsilon) + 1 * f2 / (f2 * f2 + epsilon * epsilon)) * fc3_normal_squared[i * num_band + j]; } } } } return shift; }
coin_flip_omp.c
/* * This program is the first of threes exercise in the OnRamp to * Parallel Computing - Monte Carlo Module. We will flip a coin, * simulated using rand_r(), many times and evaluate the randomness of * the results using a chi-squared test. This exercise is derived from * Libby Shoop's CS in Parallel Monte Carlo Module. * * History: * Dave Valentine (Slippery Rock University): Original C++ program * Libby Shoop (Macalester University) : Adapted for CS in * Parallel Module * Justin Ragatz (UW-La Crosse) : Adapted for OnRamp Module * rewritten in C. */ #include "coin_flip_omp.h" int main(int argc, char *argv[]) { unsigned long long num_flips = 0; unsigned long long num_heads = 0; unsigned long long num_tails = 0; int n_threads = 1; int tid; unsigned long long trial_flips = FLIPS_PER_TRIAL; unsigned long long max_flips = FLIPS_PER_TRIAL * (1LLU<<TRIALS); double start_time = -1; double end_time = -1; // Get number of threads if (argc > 1) { n_threads = atoi(argv[1]); if (n_threads > 32) { n_threads = 32; } } create_strings(); /* Malloc and initialize strings. */ /* Print introduction. */ printf("\n Settings: \n" ); printf(" Trials : %llu\n", TRIALS ); printf(" Flips per trial: %llu\n", FLIPS_PER_TRIAL); printf(" Threads : %d\n", n_threads ); printf("\n Begin Simulation... \n" ); /* Print table heading. */ printf("\n ----------------------------------------" "----------------------------------------\n"); printf(" | %15s | %15s | %15s | %11s | %8s |\n", "Trials", "Heads", "Tails", "Chi Squared", "Time"); printf(" ----------------------------------------" "----------------------------------------\n"); /* Run the simulation. */ while (trial_flips <= max_flips) { num_heads = 0; num_tails = 0; start_time = omp_get_wtime(); #pragma omp parallel num_threads(n_threads) default(none) \ private(num_flips, tid) shared(trial_flips, seeds) \ reduction(+:num_heads, num_tails) { tid = omp_get_thread_num(); seeds[tid] = abs( ( (time(NULL) * 181) * ( (tid - 83) * 359 ) ) % 104729 ); #pragma omp for for (num_flips = 0; num_flips < trial_flips; num_flips++) { if (rand_r(&seeds[tid]) % 2 == 0) { num_heads++; } else { num_tails++; } } } end_time = omp_get_wtime(); pretty_int(trial_flips, trial_string); pretty_int(num_heads , heads_string); pretty_int(num_tails , tails_string); printf(" | %15s | %15s | %15s | %11.2f | %8.2f |\n", trial_string, heads_string, tails_string, chi_squared(num_heads, num_tails), (double)(end_time - start_time)); trial_flips *= 2; } printf(" ----------------------------------------" "----------------------------------------\n"); clean_exit(0); return 0; } double chi_squared(unsigned long long heads, unsigned long long tails) { double sum = 0; // chi square sum double tot = heads + tails; // total flips double expected = 0.5 * tot; // expected heads (or tails) sum = ((heads - expected) * (heads - expected) / expected) + ((tails - expected) * (tails - expected) / expected); return sum; } int pretty_int(unsigned long long n, char* s) { int extra = 0; int commas = 0; int count = 0; int len = 0; int i; if (NULL == s) return -1; len = sprintf(s, "%llu", n); if ( len > STRING_LEN ) { printf("Buffer overflow, cannot print string.\n"); s = NULL; return -1; } extra = strlen(s) % 3; commas = (strlen(s) - extra) / 3; if (0 == extra) commas--; s[strlen(s) + commas] = '\0'; for (i = strlen(s) - 1; i > 0; i--) { count++; count = count % 3; if (0 == count) { s[i + commas] = s[i]; commas--; s[i + commas] = ','; } else { s[i + commas] = s[i]; } } return 0; } int create_strings() { int i; trial_string = (char*) malloc (sizeof(char) * STRING_LEN); if (NULL == trial_string) { fprintf(stderr, "Error: Malloc for trial_string failed.\n"); clean_exit(-1); } heads_string = (char*) malloc (sizeof(char) * STRING_LEN); if (NULL == heads_string) { fprintf(stderr, "Error: Malloc for heads_string failed.\n"); clean_exit(-1); } tails_string = (char*) malloc (sizeof(char) * STRING_LEN); if (NULL == tails_string) { fprintf(stderr, "Error: Malloc for tails_string failed.\n"); clean_exit(-1); } for (i = 0; i < STRING_LEN - 1; i++) { trial_string[i] = ' '; heads_string[i] = ' '; tails_string[i] = ' '; } trial_string[STRING_LEN - 1] = '\0'; heads_string[STRING_LEN - 1] = '\0'; tails_string[STRING_LEN - 1] = '\0'; return 0; } int clean_exit(int status) { free(trial_string); free(heads_string); free(tails_string); trial_string = NULL; heads_string = NULL; tails_string = NULL; if (status == 0) { printf("\n Normal termination.\n\n"); } else { fprintf(stderr, "\n Terminated by error.\n\n"); exit(EXIT_FAILURE); } return 0; }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/fourier.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p, *magick_restrict q; Quantum *magick_restrict r; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance, pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance=pixel*pixel; if (distance >= fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickBooleanType difference; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance, pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance=pixel*pixel; if (distance >= fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,channel,q))); else distance=QuantumScale*fabs((double) (Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q))); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,channel,q))); else distance=fabs((double) (Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q))); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); image->error.mean_error_per_pixel=area*distortion[CompositePixelChannel]; image->error.normalized_mean_error=area*QuantumScale*QuantumScale*mean_error; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; ssize_t channels, i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distortion[i]+=area*QuantumScale*((double) p[i]- image_statistics[channel].mean)*(GetPixelChannel(reconstruct_image, channel,q)-reconstruct_statistics[channel].mean); else distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)-reconstruct_statistics[channel].mean); } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ channels=0; distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; if (fabs(gamma) >= MagickEpsilon) { gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; channels++; } } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ channels); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,channel,q))); else distance=QuantumScale*fabs((double) (Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q))); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView *image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double area, c1, c2, radius, sigma; KernelInfo *kernel_info; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; /* Compute structural similarity index @ https://en.wikipedia.org/wiki/Structural_similarity. */ radius=SSIMRadius; artifact=GetImageArtifact(image,"compare:ssim-radius"); if (artifact != (const char *) NULL) radius=StringToDouble(artifact,(char **) NULL); sigma=SSIMSigma; artifact=GetImageArtifact(image,"compare:ssim-sigma"); if (artifact != (const char *) NULL) sigma=StringToDouble(artifact,(char **) NULL); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); c1=pow(SSIMK1*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k1"); if (artifact != (const char *) NULL) c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); c2=pow(SSIMK2*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k2"); if (artifact != (const char *) NULL) c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); status=MagickTrue; area=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y- ((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/ 2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels+1], x_pixel_sigma_squared[MaxPixelChannels+1], xy_sigma[MaxPixelChannels+1], y_pixel_mu[MaxPixelChannels+1], y_pixel_sigma_squared[MaxPixelChannels+1]; const Quantum *magick_restrict reference, *magick_restrict target; MagickRealType *k; ssize_t v; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } (void) memset(x_pixel_mu,0,sizeof(x_pixel_mu)); (void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared)); (void) memset(xy_sigma,0,sizeof(xy_sigma)); (void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); (void) memset(y_pixel_mu,0,sizeof(y_pixel_mu)); (void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); k=kernel_info->values; reference=p; target=q; for (v=0; v < (ssize_t) kernel_info->height; v++) { ssize_t u; for (u=0; u < (ssize_t) kernel_info->width; u++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel=QuantumScale*reference[i]; x_pixel_mu[i]+=(*k)*x_pixel; x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel; y_pixel=QuantumScale* GetPixelChannel(reconstruct_image,channel,target); y_pixel_mu[i]+=(*k)*y_pixel; y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel; xy_sigma[i]+=(*k)*x_pixel*y_pixel; } k++; reference+=GetPixelChannels(image); target+=GetPixelChannels(reconstruct_image); } reference+=GetPixelChannels(image)*columns; target+=GetPixelChannels(reconstruct_image)*columns; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i]; y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i]; xy_mu=x_pixel_mu[i]*y_pixel_mu[i]; xy_sigmas=xy_sigma[i]-xy_mu; x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared; y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared; ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/ ((x_pixel_mu_squared+y_pixel_mu_squared+c1)* (x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2)); channel_distortion[i]+=ssim; channel_distortion[CompositePixelChannel]+=ssim; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif area++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i=0; i <= MaxPixelChannels; i++) distortion[i]+=channel_distortion[i]; } image_view=DestroyCacheView(image_view); reconstruct_view=DestroyCacheView(reconstruct_view); for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[j]/=area; } distortion[CompositePixelChannel]/=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); kernel_info=DestroyKernelInfo(kernel_info); return(status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetStructuralSimilarityDistortion(image,reconstruct_image, distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=(1.0-(distortion[i]))/2.0; return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image, channel,q))); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image, channel,q))); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE) static Image *CrossCorrelationImage(const Image *alpha_image, const Image *beta_image,ExceptionInfo *exception) { Image *clone_image, *complex_conjugate, *complex_multiplication, *cross_correlation, *fft_images; /* Take the FFT of beta image. */ clone_image=CloneImage(beta_image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return(clone_image); (void) SetImageArtifact(clone_image,"fourier:normalize","inverse"); fft_images=ForwardFourierTransformImage(clone_image,MagickFalse, exception); clone_image=DestroyImageList(clone_image); if (fft_images == (Image *) NULL) return(fft_images); /* Take the complex conjugate of beta image. */ complex_conjugate=ComplexImages(fft_images,ConjugateComplexOperator, exception); fft_images=DestroyImageList(fft_images); if (complex_conjugate == (Image *) NULL) return(complex_conjugate); /* Take the FFT of the alpha image. */ clone_image=CloneImage(alpha_image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) { complex_conjugate=DestroyImageList(complex_conjugate); return(clone_image); } (void) SetImageArtifact(clone_image,"fourier:normalize","inverse"); fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,exception); clone_image=DestroyImageList(clone_image); if (fft_images == (Image *) NULL) { complex_conjugate=DestroyImageList(complex_conjugate); return(fft_images); } complex_conjugate->next->next=fft_images; /* Do complex multiplication. */ (void) SetImageArtifact(complex_conjugate,"compose:clamp","false"); complex_multiplication=ComplexImages(complex_conjugate, MultiplyComplexOperator,exception); complex_conjugate=DestroyImageList(complex_conjugate); if (fft_images == (Image *) NULL) return(fft_images); /* Do the IFT and return the cross-correlation result. */ cross_correlation=InverseFourierTransformImage(complex_multiplication, complex_multiplication->next,MagickFalse,exception); complex_multiplication=DestroyImageList(complex_multiplication); return(cross_correlation); } static Image *NCCDivideImage(const Image *alpha_image,const Image *beta_image, ExceptionInfo *exception) { CacheView *alpha_view, *beta_view; Image *divide_image; MagickBooleanType status; ssize_t y; /* Divide one image into another. */ divide_image=CloneImage(alpha_image,0,0,MagickTrue,exception); if (divide_image == (Image *) NULL) return(divide_image); status=MagickTrue; alpha_view=AcquireAuthenticCacheView(divide_image,exception); beta_view=AcquireVirtualCacheView(beta_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(beta_image,divide_image,divide_image->rows,1) #endif for (y=0; y < (ssize_t) divide_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1, exception); q=GetCacheViewAuthenticPixels(alpha_view,0,y,divide_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) divide_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(divide_image); i++) { PixelChannel channel = GetPixelChannelChannel(divide_image,i); PixelTrait traits = GetPixelChannelTraits(divide_image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(p[i]) >= MagickEpsilon) q[i]*=PerceptibleReciprocal(QuantumScale*p[i]); } p+=GetPixelChannels(beta_image); q+=GetPixelChannels(divide_image); } if (SyncCacheViewAuthenticPixels(alpha_view,exception) == MagickFalse) status=MagickFalse; } beta_view=DestroyCacheView(beta_view); alpha_view=DestroyCacheView(alpha_view); if (status == MagickFalse) divide_image=DestroyImage(divide_image); return(divide_image); } static MagickBooleanType NCCMaximaImage(const Image *image,double *maxima, RectangleInfo *offset,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Identify the maxima value in the image and its location. */ status=MagickTrue; *maxima=0.0; offset->x=0; offset->y=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double sum = 0.0; ssize_t channels = 0, i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; sum+=p[i]; channels++; } if ((channels != 0) && ((sum/channels) > *maxima)) { *maxima=sum/channels; offset->x=x; offset->y=y; } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType NCCMultiplyImage(Image *image,const double factor, const ChannelStatistics *channel_statistics,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Multiply each pixel by a factor. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (channel_statistics != (const ChannelStatistics *) NULL) q[i]*=QuantumScale*channel_statistics[channel].standard_deviation; q[i]*=factor; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } static Image *NCCSquareImage(const Image *image,ExceptionInfo *exception) { CacheView *image_view; Image *square_image; MagickBooleanType status; ssize_t y; /* Square each pixel in the image. */ square_image=CloneImage(image,0,0,MagickTrue,exception); if (square_image == (Image *) NULL) return(square_image); status=MagickTrue; image_view=AcquireAuthenticCacheView(square_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(square_image,square_image,square_image->rows,1) #endif for (y=0; y < (ssize_t) square_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,square_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) square_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(square_image); i++) { PixelChannel channel = GetPixelChannelChannel(square_image,i); PixelTrait traits = GetPixelChannelTraits(square_image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]*=QuantumScale*q[i]; } q+=GetPixelChannels(square_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) square_image=DestroyImage(square_image); return(square_image); } static Image *NCCSubtractImageMean(const Image *alpha_image, const Image *beta_image,const ChannelStatistics *channel_statistics, ExceptionInfo *exception) { CacheView *beta_view, *image_view; Image *gamma_image; MagickBooleanType status; ssize_t y; /* Subtract the image mean and pad. */ gamma_image=CloneImage(beta_image,alpha_image->columns,alpha_image->rows, MagickTrue,exception); if (gamma_image == (Image *) NULL) return(gamma_image); status=MagickTrue; image_view=AcquireAuthenticCacheView(gamma_image,exception); beta_view=AcquireVirtualCacheView(beta_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(beta_image,gamma_image,gamma_image->rows,1) #endif for (y=0; y < (ssize_t) gamma_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,gamma_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gamma_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(gamma_image); i++) { PixelChannel channel = GetPixelChannelChannel(gamma_image,i); PixelTrait traits = GetPixelChannelTraits(gamma_image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((x >= (ssize_t) beta_image->columns) || (y >= (ssize_t) beta_image->rows)) q[i]=(Quantum) 0; else q[i]=p[i]-channel_statistics[channel].mean; } p+=GetPixelChannels(beta_image); q+=GetPixelChannels(gamma_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } beta_view=DestroyCacheView(beta_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) gamma_image=DestroyImage(gamma_image); return(gamma_image); } static Image *NCCUnityImage(const Image *alpha_image,const Image *beta_image, ExceptionInfo *exception) { CacheView *image_view; Image *unity_image; MagickBooleanType status; ssize_t y; /* Create a padded unity image. */ unity_image=CloneImage(alpha_image,alpha_image->columns,alpha_image->rows, MagickTrue,exception); if (unity_image == (Image *) NULL) return(unity_image); status=MagickTrue; image_view=AcquireAuthenticCacheView(unity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(unity_image,unity_image,unity_image->rows,1) #endif for (y=0; y < (ssize_t) unity_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,unity_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) unity_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(unity_image); i++) { PixelChannel channel = GetPixelChannelChannel(unity_image,i); PixelTrait traits = GetPixelChannelTraits(unity_image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=QuantumRange; if ((x >= (ssize_t) beta_image->columns) || (y >= (ssize_t) beta_image->rows)) q[i]=(Quantum) 0; } q+=GetPixelChannels(unity_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) unity_image=DestroyImage(unity_image); return(unity_image); } static Image *NCCVarianceImage(Image *alpha_image,const Image *beta_image, ExceptionInfo *exception) { CacheView *beta_view, *image_view; Image *variance_image; MagickBooleanType status; ssize_t y; /* Compute the variance of the two images. */ variance_image=CloneImage(alpha_image,0,0,MagickTrue,exception); if (variance_image == (Image *) NULL) return(variance_image); status=MagickTrue; image_view=AcquireAuthenticCacheView(variance_image,exception); beta_view=AcquireVirtualCacheView(beta_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(beta_image,variance_image,variance_image->rows,1) #endif for (y=0; y < (ssize_t) variance_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,variance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) variance_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(variance_image); i++) { PixelChannel channel = GetPixelChannelChannel(variance_image,i); PixelTrait traits = GetPixelChannelTraits(variance_image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum((QuantumRange*sqrt(fabs((double) QuantumScale* (q[i]-p[i])))))/sqrt((double) QuantumRange); } p+=GetPixelChannels(beta_image); q+=GetPixelChannels(variance_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } beta_view=DestroyCacheView(beta_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) variance_image=DestroyImage(variance_image); return(variance_image); } static Image *NCCSimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define DestroySimilarityResources() \ { \ if (channel_statistics != (ChannelStatistics *) NULL) \ channel_statistics=(ChannelStatistics *) \ RelinquishMagickMemory(channel_statistics); \ if (beta_image != (Image *) NULL) \ beta_image=DestroyImage(beta_image); \ if (gamma_image != (Image *) NULL) \ gamma_image=DestroyImage(gamma_image); \ if (ncc_image != (Image *) NULL) \ ncc_image=DestroyImage(ncc_image); \ if (normalize_image != (Image *) NULL) \ normalize_image=DestroyImage(normalize_image); \ if (square_image != (Image *) NULL) \ square_image=DestroyImage(square_image); \ if (unity_image != (Image *) NULL) \ unity_image=DestroyImage(unity_image); \ } #define ThrowSimilarityException() \ { \ DestroySimilarityResources() \ return((Image *) NULL); \ } ChannelStatistics *channel_statistics = (ChannelStatistics *) NULL; double maxima = 0.0; Image *beta_image = (Image *) NULL, *correlation_image = (Image *) NULL, *gamma_image = (Image *) NULL, *ncc_image = (Image *) NULL, *normalize_image = (Image *) NULL, *square_image = (Image *) NULL, *unity_image = (Image *) NULL; MagickBooleanType status; RectangleInfo geometry; /* Accelerated correlation-based image similary using FFT local statistics. Contributed by Fred Weinhaus. */ square_image=NCCSquareImage(image,exception); if (square_image == (Image *) NULL) ThrowSimilarityException(); unity_image=NCCUnityImage(image,reference,exception); if (unity_image == (Image *) NULL) ThrowSimilarityException(); /* Compute the cross correlation of the square and unity images. */ ncc_image=CrossCorrelationImage(square_image,unity_image,exception); square_image=DestroyImage(square_image); \ if (ncc_image == (Image *) NULL) ThrowSimilarityException(); status=NCCMultiplyImage(ncc_image,(double) QuantumRange*reference->columns* reference->rows,(const ChannelStatistics *) NULL,exception); if (status == MagickFalse) ThrowSimilarityException(); /* Compute the cross correlation of the source and unity images. */ gamma_image=CrossCorrelationImage(image,unity_image,exception); unity_image=DestroyImage(unity_image); if (gamma_image == (Image *) NULL) ThrowSimilarityException(); square_image=NCCSquareImage(gamma_image,exception); gamma_image=DestroyImage(gamma_image); status=NCCMultiplyImage(square_image,(double) QuantumRange, (const ChannelStatistics *) NULL,exception); if (status == MagickFalse) ThrowSimilarityException(); /* Compute the variance of the two images. */ gamma_image=NCCVarianceImage(ncc_image,square_image,exception); square_image=DestroyImage(square_image); ncc_image=DestroyImage(ncc_image); if (gamma_image == (Image *) NULL) ThrowSimilarityException(); channel_statistics=GetImageStatistics(reference,exception); if (channel_statistics == (ChannelStatistics *) NULL) ThrowSimilarityException(); /* Subtract the image mean. */ status=NCCMultiplyImage(gamma_image,1.0,channel_statistics,exception); if (status == MagickFalse) ThrowSimilarityException(); normalize_image=NCCSubtractImageMean(image,reference,channel_statistics, exception); if (normalize_image == (Image *) NULL) ThrowSimilarityException(); ncc_image=CrossCorrelationImage(image,normalize_image,exception); normalize_image=DestroyImage(normalize_image); if (ncc_image == (Image *) NULL) ThrowSimilarityException(); /* Divide the two images. */ beta_image=NCCDivideImage(ncc_image,gamma_image,exception); ncc_image=DestroyImage(ncc_image); gamma_image=DestroyImage(gamma_image); if (beta_image == (Image *) NULL) ThrowSimilarityException(); (void) ResetImagePage(beta_image,"0x0+0+0"); SetGeometry(image,&geometry); geometry.width=image->columns-reference->columns; geometry.height=image->rows-reference->rows; /* Crop padding. */ correlation_image=CropImage(beta_image,&geometry,exception); beta_image=DestroyImage(beta_image); if (correlation_image == (Image *) NULL) ThrowSimilarityException(); (void) ResetImagePage(correlation_image,"0x0+0+0"); /* Identify the maxima value in the image and its location. */ status=GrayscaleImage(correlation_image,AveragePixelIntensityMethod, exception); if (status == MagickFalse) ThrowSimilarityException(); status=NCCMaximaImage(correlation_image,&maxima,offset,exception); if (status == MagickFalse) { correlation_image=DestroyImage(correlation_image); ThrowSimilarityException(); } *similarity_metric=1.0-QuantumScale*maxima; DestroySimilarityResources(); return(correlation_image); } #endif static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; #if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE) { const char *artifact = GetImageArtifact(image,"compare:accelerate-ncc"); MagickBooleanType accelerate = (artifact != (const char *) NULL) && (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; if ((accelerate != MagickFalse) && (metric == NormalizedCrossCorrelationErrorMetric)) { similarity_image=NCCSimilarityImage(image,reference,metric, similarity_threshold,offset,similarity_metric,exception); return(similarity_image); } } #endif similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
GB_binop__bxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint32) // A*D function (colscale): GB (_AxD__bxor_uint32) // D*A function (rowscale): GB (_DxB__bxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint32) // C=scalar+B GB (_bind1st__bxor_uint32) // C=scalar+B' GB (_bind1st_tran__bxor_uint32) // C=A+scalar GB (_bind2nd__bxor_uint32) // C=A'+scalar GB (_bind2nd_tran__bxor_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
libperf.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED. * See file LICENSE for terms. */ #include <ucs/debug/log.h> #include <ucs/arch/bitops.h> #include <ucs/sys/module.h> #include <string.h> #include <malloc.h> #include <tools/perf/lib/libperf_int.h> #include <unistd.h> #if _OPENMP #include <omp.h> #endif /* _OPENMP */ #define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \ _status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \ if (_status != UCS_OK) { \ ucs_error("%s/%s does not support atomic %s for message size %zu bytes", \ (_params)->uct.tl_name, (_params)->uct.dev_name, \ (_msg)[_op], (_size)); \ return _status; \ } #define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \ if (!ucs_test_all_flags(_attr, _required)) { \ if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \ ucs_error("%s/%s does not support required "#_size"-bit atomic: %s", \ (_params)->uct.tl_name, (_params)->uct.dev_name, \ (_msg)[ucs_ffs64(~(_attr) & (_required))]); \ } \ return UCS_ERR_UNSUPPORTED; \ } typedef struct { union { struct { size_t dev_addr_len; size_t iface_addr_len; size_t ep_addr_len; } uct; struct { size_t addr_len; } ucp; }; size_t rkey_size; unsigned long recv_buffer; } ucx_perf_ep_info_t; const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_LAST]; static const char *perf_iface_ops[] = { [ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short", [ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy", [ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short", [ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short", [ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy", [ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep", [ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability", [ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback", [ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback", [ucs_ilog2(UCT_IFACE_FLAG_EVENT_SEND_COMP)] = "send completion event", [ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV)] = "tag or active message event", [ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV_SIG)] = "signaled message event", [ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy" }; static const char *perf_atomic_op[] = { [UCT_ATOMIC_OP_ADD] = "add", [UCT_ATOMIC_OP_AND] = "and", [UCT_ATOMIC_OP_OR] = "or" , [UCT_ATOMIC_OP_XOR] = "xor" }; static const char *perf_atomic_fop[] = { [UCT_ATOMIC_OP_ADD] = "fetch-add", [UCT_ATOMIC_OP_AND] = "fetch-and", [UCT_ATOMIC_OP_OR] = "fetch-or", [UCT_ATOMIC_OP_XOR] = "fetch-xor", [UCT_ATOMIC_OP_SWAP] = "swap", [UCT_ATOMIC_OP_CSWAP] = "cswap" }; /* * This Quickselect routine is based on the algorithm described in * "Numerical recipes in C", Second Edition, * Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5 * This code by Nicolas Devillard - 1998. Public domain. */ static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n) { int low, high ; int median; int middle, ll, hh; #define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; } low = 0 ; high = n-1 ; median = (low + high) / 2; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) / 2; if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ; if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ ELEM_SWAP(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; ELEM_SWAP(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ ELEM_SWAP(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; unsigned flags; size_t buffer_size; if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* TODO use params->alignment */ flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ? UCT_MD_MEM_FLAG_NONBLOCK : 0; flags |= UCT_MD_MEM_ACCESS_ALL; /* Allocate send buffer memory */ status = uct_iface_mem_alloc(perf->uct.iface, buffer_size * params->thread_count, flags, "perftest", &perf->uct.send_mem); if (status != UCS_OK) { ucs_error("Failed allocate send buffer: %s", ucs_status_string(status)); goto err; } ucs_assert(perf->uct.send_mem.md == perf->uct.md); perf->send_buffer = perf->uct.send_mem.address; /* Allocate receive buffer memory */ status = uct_iface_mem_alloc(perf->uct.iface, buffer_size * params->thread_count, flags, "perftest", &perf->uct.recv_mem); if (status != UCS_OK) { ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status)); goto err_free_send; } ucs_assert(perf->uct.recv_mem.md == perf->uct.md); perf->recv_buffer = perf->uct.recv_mem.address; /* Allocate IOV datatype memory */ perf->params.msg_size_cnt = params->msg_size_cnt; perf->uct.iov = malloc(sizeof(*perf->uct.iov) * perf->params.msg_size_cnt * params->thread_count); if (NULL == perf->uct.iov) { status = UCS_ERR_NO_MEMORY; ucs_error("Failed allocate send IOV(%lu) buffer: %s", perf->params.msg_size_cnt, ucs_status_string(status)); goto err_free_send; } perf->offset = 0; ucs_debug("allocated memory. Send buffer %p, Recv buffer %p", perf->send_buffer, perf->recv_buffer); return UCS_OK; err_free_send: uct_iface_mem_free(&perf->uct.send_mem); err: return status; } static void uct_perf_test_free_mem(ucx_perf_context_t *perf) { uct_iface_mem_free(&perf->uct.send_mem); uct_iface_mem_free(&perf->uct.recv_mem); free(perf->uct.iov); } void ucx_perf_test_start_clock(ucx_perf_context_t *perf) { ucs_time_t start_time = ucs_get_time(); perf->start_time_acc = ucs_get_accurate_time(); perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX : ucs_time_from_sec(perf->params.max_time) + start_time; perf->prev_time = start_time; perf->prev.time = start_time; perf->prev.time_acc = perf->start_time_acc; perf->current.time_acc = perf->start_time_acc; } /* Initialize/reset all parameters that could be modified by the warm-up run */ static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, ucx_perf_params_t *params) { unsigned i; perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX : perf->params.max_iter; perf->report_interval = ucs_time_from_sec(perf->params.report_interval); perf->current.time = 0; perf->current.msgs = 0; perf->current.bytes = 0; perf->current.iters = 0; perf->prev.msgs = 0; perf->prev.bytes = 0; perf->prev.iters = 0; perf->timing_queue_head = 0; for (i = 0; i < TIMING_QUEUE_SIZE; ++i) { perf->timing_queue[i] = 0; } ucx_perf_test_start_clock(perf); } static void ucx_perf_test_init(ucx_perf_context_t *perf, ucx_perf_params_t *params) { perf->params = *params; perf->offset = 0; perf->allocator = ucx_perf_mem_type_allocators[params->mem_type]; ucx_perf_test_prepare_new_run(perf, params); } void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result) { ucs_time_t median; double factor; if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) { factor = 2.0; } else { factor = 1.0; } result->iters = perf->current.iters; result->bytes = perf->current.bytes; result->elapsed_time = perf->current.time_acc - perf->start_time_acc; /* Latency */ median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE); result->latency.typical = ucs_time_to_sec(median) / factor; result->latency.moment_average = (perf->current.time_acc - perf->prev.time_acc) / (perf->current.iters - perf->prev.iters) / factor; result->latency.total_average = (perf->current.time_acc - perf->start_time_acc) / perf->current.iters / factor; /* Bandwidth */ result->bandwidth.typical = 0.0; // Undefined result->bandwidth.moment_average = (perf->current.bytes - perf->prev.bytes) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->bandwidth.total_average = perf->current.bytes / (perf->current.time_acc - perf->start_time_acc) * factor; /* Packet rate */ result->msgrate.typical = 0.0; // Undefined result->msgrate.moment_average = (perf->current.msgs - perf->prev.msgs) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->msgrate.total_average = perf->current.msgs / (perf->current.time_acc - perf->start_time_acc) * factor; } static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params) { size_t it; if (ucx_perf_get_message_size(params) < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size too small, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } if (params->max_outstanding < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("max_outstanding, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } /* check if particular message size fit into stride size */ if (params->iov_stride) { for (it = 0; it < params->msg_size_cnt; ++it) { if (params->msg_size_list[it] > params->iov_stride) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Buffer size %lu bigger than stride %lu", params->msg_size_list[it], params->iov_stride); } return UCS_ERR_INVALID_PARAM; } } } return UCS_OK; } void uct_perf_iface_flush_b(ucx_perf_context_t *perf) { ucs_status_t status; do { status = uct_iface_flush(perf->uct.iface, 0, NULL); uct_worker_progress(perf->uct.worker); } while (status == UCS_INPROGRESS); } static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f, uint64_t bcopy_f, uint64_t zcopy_f) { return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f : 0; } static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32, uint64_t *op64, uint64_t op) { if (size == sizeof(uint32_t)) { *op32 = UCS_BIT(op); return UCS_OK; } else if (size == sizeof(uint64_t)) { *op64 = UCS_BIT(op); return UCS_OK; } return UCS_ERR_UNSUPPORTED; } static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m, size_t bcopy_m, uint64_t zcopy_m) { return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m : 0; } static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params, uct_iface_h iface) { uint64_t required_flags = 0; uint64_t atomic_op32 = 0; uint64_t atomic_op64 = 0; uint64_t atomic_fop32 = 0; uint64_t atomic_fop64 = 0; uct_iface_attr_t attr; ucs_status_t status; size_t min_size, max_size, max_iov, message_size; status = uct_iface_query(iface, &attr); if (status != UCS_OK) { return status; } min_size = 0; max_iov = 1; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_AM: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT, UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY); required_flags |= UCT_IFACE_FLAG_CB_SYNC; min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.am.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short, attr.cap.am.max_bcopy, attr.cap.am.max_zcopy); max_iov = attr.cap.am.max_iov; break; case UCX_PERF_CMD_PUT: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT, UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.put.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short, attr.cap.put.max_bcopy, attr.cap.put.max_zcopy); max_iov = attr.cap.put.max_iov; break; case UCX_PERF_CMD_GET: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT, UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.get.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short, attr.cap.get.max_bcopy, attr.cap.get.max_zcopy); max_iov = attr.cap.get.max_iov; break; case UCX_PERF_CMD_ADD: ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD, perf_atomic_op, params, status); max_size = 8; break; case UCX_PERF_CMD_FADD: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_SWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_CSWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP, perf_atomic_fop, params, status); max_size = 8; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } /* check atomics first */ ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op); ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op); ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop); ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop); /* check iface flags */ if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) && (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("%s/%s does not support operation %s", params->uct.tl_name, params->uct.dev_name, perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]); } return UCS_ERR_UNSUPPORTED; } if (message_size < min_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is smaller than min supported (%zu)", message_size, min_size); } return UCS_ERR_UNSUPPORTED; } if (message_size > max_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is larger than max supported (%zu)", message_size, max_size); } return UCS_ERR_UNSUPPORTED; } if (params->command == UCX_PERF_CMD_AM) { if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) && (params->am_hdr_size != sizeof(uint64_t))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Short AM header size must be 8 bytes"); } return UCS_ERR_INVALID_PARAM; } if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) && (params->am_hdr_size > attr.cap.am.max_hdr)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than max supported (%zu)", params->am_hdr_size, attr.cap.am.max_hdr); } return UCS_ERR_UNSUPPORTED; } if (params->am_hdr_size > message_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than message size (%zu)", params->am_hdr_size, message_size); } return UCS_ERR_INVALID_PARAM; } if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM flow-control window (%d) too large (should be <= %d)", params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) && (params->flags & UCX_PERF_TEST_FLAG_VERBOSE)) { ucs_warn("Running active-message test with on-sided progress"); } } if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) { if (params->msg_size_cnt > max_iov) { if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) || !params->msg_size_cnt) { ucs_error("Wrong number of IOV entries. Requested is %lu, " "should be in the range 1...%lu", params->msg_size_cnt, max_iov); } return UCS_ERR_UNSUPPORTED; } /* if msg_size_cnt == 1 the message size checked above */ if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) { if (params->am_hdr_size > params->msg_size_list[0]) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%lu) larger than the first IOV " "message size (%lu)", params->am_hdr_size, params->msg_size_list[0]); } return UCS_ERR_INVALID_PARAM; } } } return UCS_OK; } static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf) { const size_t buffer_size = 2048; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; uct_device_addr_t *dev_addr; uct_iface_addr_t *iface_addr; uct_ep_addr_t *ep_addr; uct_iface_attr_t iface_attr; uct_md_attr_t md_attr; uct_ep_params_t ep_params; void *rkey_buffer; ucs_status_t status; struct iovec vec[5]; void *buffer; void *req; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE buffer"); status = UCS_ERR_NO_MEMORY; goto err; } status = uct_iface_query(perf->uct.iface, &iface_attr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status)); goto err_free; } status = uct_md_query(perf->uct.md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to uct_md_query: %s", ucs_status_string(status)); goto err_free; } if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) { info.rkey_size = md_attr.rkey_packed_size; } else { info.rkey_size = 0; } info.uct.dev_addr_len = iface_attr.device_addr_len; info.uct.iface_addr_len = iface_attr.iface_addr_len; info.uct.ep_addr_len = iface_attr.ep_addr_len; info.recv_buffer = (uintptr_t)perf->recv_buffer; rkey_buffer = buffer; dev_addr = (void*)rkey_buffer + info.rkey_size; iface_addr = (void*)dev_addr + info.uct.dev_addr_len; ep_addr = (void*)iface_addr + info.uct.iface_addr_len; ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size); status = uct_iface_get_device_address(perf->uct.iface, dev_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_device_address: %s", ucs_status_string(status)); goto err_free; } status = uct_iface_get_address(perf->uct.iface, iface_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status)); goto err_free; } if (info.rkey_size > 0) { memset(rkey_buffer, 0, info.rkey_size); status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status)); goto err_free; } } group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->uct.peers == NULL) { goto err_free; } ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE; ep_params.iface = perf->uct.iface; if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); if (status != UCS_OK) { ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status)); goto err_destroy_eps; } status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr); if (status != UCS_OK) { ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status)); goto err_destroy_eps; } } } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR | UCT_EP_PARAM_FIELD_IFACE_ADDR; } vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = buffer; vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len + info.uct.iface_addr_len + info.uct.ep_addr_len; rte_call(perf, post_vec, vec, 2, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; rkey_buffer = remote_info + 1; dev_addr = (void*)rkey_buffer + remote_info->rkey_size; iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len; ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len; perf->uct.peers[i].remote_addr = remote_info->recv_buffer; if (!uct_iface_is_reachable(perf->uct.iface, dev_addr, remote_info->uct.iface_addr_len ? iface_addr : NULL)) { ucs_error("Destination is unreachable"); status = UCS_ERR_UNREACHABLE; goto err_destroy_eps; } if (remote_info->rkey_size > 0) { status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status)); goto err_destroy_eps; } } else { perf->uct.peers[i].rkey.handle = NULL; perf->uct.peers[i].rkey.type = NULL; perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.dev_addr = dev_addr; ep_params.iface_addr = iface_addr; status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); } else { status = UCS_ERR_UNSUPPORTED; } if (status != UCS_OK) { ucs_error("Failed to connect endpoint: %s", ucs_status_string(status)); goto err_destroy_eps; } } uct_perf_iface_flush_b(perf); free(buffer); uct_perf_barrier(perf); return UCS_OK; err_destroy_eps: for (i = 0; i < group_size; ++i) { if (perf->uct.peers[i].rkey.type != NULL) { uct_rkey_release(&perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep != NULL) { uct_ep_destroy(perf->uct.peers[i].ep); } } free(perf->uct.peers); err_free: free(buffer); err: return status; } static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { unsigned group_size, group_index, i; uct_perf_barrier(perf); uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0); group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); for (i = 0; i < group_size; ++i) { if (i != group_index) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(&perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep) { uct_ep_destroy(perf->uct.peers[i].ep); } } } free(perf->uct.peers); } static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params, ucp_params_t *ucp_params) { ucs_status_t status, message_size; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_PUT: case UCX_PERF_CMD_GET: ucp_params->features |= UCP_FEATURE_RMA; break; case UCX_PERF_CMD_ADD: case UCX_PERF_CMD_FADD: case UCX_PERF_CMD_SWAP: case UCX_PERF_CMD_CSWAP: if (message_size == sizeof(uint32_t)) { ucp_params->features |= UCP_FEATURE_AMO32; } else if (message_size == sizeof(uint64_t)) { ucp_params->features |= UCP_FEATURE_AMO64; } else { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Atomic size should be either 32 or 64 bit"); } return UCS_ERR_INVALID_PARAM; } break; case UCX_PERF_CMD_TAG: case UCX_PERF_CMD_TAG_SYNC: ucp_params->features |= UCP_FEATURE_TAG; ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE; ucp_params->request_size = sizeof(ucp_perf_request_t); break; case UCX_PERF_CMD_STREAM: ucp_params->features |= UCP_FEATURE_STREAM; ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE; ucp_params->request_size = sizeof(ucp_perf_request_t); break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype, size_t iovcnt, unsigned thread_count, ucp_dt_iov_t **iov_p) { ucp_dt_iov_t *iov; if (UCP_PERF_DATATYPE_IOV == datatype) { iov = malloc(sizeof(*iov) * iovcnt * thread_count); if (NULL == iov) { ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt); return UCS_ERR_NO_MEMORY; } *iov_p = iov; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_host(ucx_perf_context_t *perf, size_t length, void **address_p, ucp_mem_h *memh, int non_blk_flag) { ucp_mem_map_params_t mem_map_params; ucp_mem_attr_t mem_attr; ucs_status_t status; mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH | UCP_MEM_MAP_PARAM_FIELD_FLAGS; mem_map_params.address = *address_p; mem_map_params.length = length; mem_map_params.flags = UCP_MEM_MAP_ALLOCATE; if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) { mem_map_params.flags |= non_blk_flag; } status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh); if (status != UCS_OK) { goto err; } mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS; status = ucp_mem_query(*memh, &mem_attr); if (status != UCS_OK) { goto err; } *address_p = mem_attr.address; return UCS_OK; err: return status; } static void ucp_perf_test_free_host(ucx_perf_context_t *perf, void *address, ucp_mem_h memh) { ucs_status_t status; status = ucp_mem_unmap(perf->ucp.context, memh); if (status != UCS_OK) { ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status)); } } static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; size_t buffer_size; if (params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* Allocate send buffer memory */ perf->send_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->send_buffer, &perf->ucp.send_memh, UCP_MEM_MAP_NONBLOCK); if (status != UCS_OK) { goto err; } /* Allocate receive buffer memory */ perf->recv_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->recv_buffer, &perf->ucp.recv_memh, 0); if (status != UCS_OK) { goto err_free_send_buffer; } /* Allocate IOV datatype memory */ perf->ucp.send_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.send_iov); if (UCS_OK != status) { goto err_free_buffers; } perf->ucp.recv_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.recv_iov); if (UCS_OK != status) { goto err_free_send_iov_buffers; } return UCS_OK; err_free_send_iov_buffers: free(perf->ucp.send_iov); err_free_buffers: perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); err_free_send_buffer: perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); err: return UCS_ERR_NO_MEMORY; } static void ucp_perf_test_free_mem(ucx_perf_context_t *perf) { free(perf->ucp.recv_iov); free(perf->ucp.send_iov); perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); } static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf, unsigned group_size) { ucs_status_ptr_t *reqs; ucp_tag_recv_info_t info; ucs_status_t status; unsigned i; reqs = calloc(sizeof(*reqs), group_size); for (i = 0; i < group_size; ++i) { if (perf->ucp.peers[i].rkey != NULL) { ucp_rkey_destroy(perf->ucp.peers[i].rkey); } if (perf->ucp.peers[i].ep != NULL) { reqs[i] = ucp_disconnect_nb(perf->ucp.peers[i].ep); } } for (i = 0; i < group_size; ++i) { if (!UCS_PTR_IS_PTR(reqs[i])) { continue; } do { ucp_worker_progress(perf->ucp.worker); status = ucp_request_test(reqs[i], &info); } while (status == UCS_INPROGRESS); ucp_request_release(reqs[i]); } free(reqs); free(perf->ucp.peers); } static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf, ucs_status_t status) { unsigned group_size = rte_call(perf, group_size); ucs_status_t collective_status = status; struct iovec vec; void *req = NULL; unsigned i; vec.iov_base = &status; vec.iov_len = sizeof(status); rte_call(perf, post_vec, &vec, 1, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { rte_call(perf, recv, i, &status, sizeof(status), req); if (status != UCS_OK) { collective_status = status; } } return collective_status; } static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf, uint64_t features) { const size_t buffer_size = 2048; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; ucp_address_t *address; size_t address_length = 0; ucp_ep_params_t ep_params; ucs_status_t status; struct iovec vec[3]; void *rkey_buffer; void *req = NULL; void *buffer; group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status)); } goto err; } info.ucp.addr_len = address_length; info.recv_buffer = (uintptr_t)perf->recv_buffer; vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = address; vec[1].iov_len = address_length; if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh, &rkey_buffer, &info.rkey_size); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status)); } ucp_worker_release_address(perf->ucp.worker, address); goto err; } vec[2].iov_base = rkey_buffer; vec[2].iov_len = info.rkey_size; rte_call(perf, post_vec, vec, 3, &req); ucp_rkey_buffer_release(rkey_buffer); } else { info.rkey_size = 0; rte_call(perf, post_vec, vec, 2, &req); } ucp_worker_release_address(perf->ucp.worker, address); rte_call(perf, exchange_vec, req); perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->ucp.peers == NULL) { goto err; } buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE receive buffer"); status = UCS_ERR_NO_MEMORY; goto err_destroy_eps; } for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; address = (void*)(remote_info + 1); rkey_buffer = (void*)address + remote_info->ucp.addr_len; perf->ucp.peers[i].remote_addr = remote_info->recv_buffer; ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ep_params.address = address; status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status)); } goto err_free_buffer; } if (remote_info->rkey_size > 0) { status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer, &perf->ucp.peers[i].rkey); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status)); } goto err_free_buffer; } } else { perf->ucp.peers[i].rkey = NULL; } } free(buffer); status = ucp_perf_test_exchange_status(perf, UCS_OK); if (status != UCS_OK) { ucp_perf_test_destroy_eps(perf, group_size); } /* force wireup completion */ status = ucp_worker_flush(perf->ucp.worker); if (status != UCS_OK) { ucs_warn("ucp_worker_flush() failed: %s", ucs_status_string(status)); } return status; err_free_buffer: free(buffer); err_destroy_eps: ucp_perf_test_destroy_eps(perf, group_size); err: (void)ucp_perf_test_exchange_status(perf, status); return status; } static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { unsigned group_size; ucp_perf_barrier(perf); group_size = rte_call(perf, group_size); ucp_perf_test_destroy_eps(perf, group_size); } static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params) { perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10); perf->report_interval = -1; } static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf) { uct_md_resource_desc_t *md_resources; uct_tl_resource_desc_t *tl_resources; unsigned i, num_md_resources; unsigned j, num_tl_resources; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_md_resources(&md_resources, &num_md_resources); if (status != UCS_OK) { goto out; } for (i = 0; i < num_md_resources; ++i) { status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config); if (status != UCS_OK) { goto out_release_md_resources; } status = uct_md_open(md_resources[i].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto out_release_md_resources; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); goto out_release_md_resources; } for (j = 0; j < num_tl_resources; ++j) { if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) && !strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name)) { uct_release_tl_resource_list(tl_resources); perf->uct.md = md; status = UCS_OK; goto out_release_md_resources; } } uct_md_close(md); uct_release_tl_resource_list(tl_resources); } ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name, perf->params.uct.dev_name); status = UCS_ERR_NO_DEVICE; out_release_md_resources: uct_release_md_resource_list(md_resources); out: return status; } void uct_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))uct_worker_progress, (void*)perf->uct.worker); } void ucp_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress, (void*)perf->ucp.worker); } static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE | UCT_IFACE_PARAM_FIELD_STATS_ROOT | UCT_IFACE_PARAM_FIELD_RX_HEADROOM | UCT_IFACE_PARAM_FIELD_CPU_MASK, .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = params->uct.tl_name, .mode.device.dev_name = params->uct.dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface); /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, status); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf); if (status != UCS_OK) { goto out_iface_close; } /* Enable progress before `uct_iface_flush` and `uct_worker_progress` called * to give a chance to finish connection for some tranports (ib/ud, tcp). * They may return UCS_INPROGRESS from `uct_iface_flush` when connections are * in progress */ uct_iface_progress_enable(perf->uct.iface, UCT_PROGRESS_SEND | UCT_PROGRESS_RECV); status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); } static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf) { ucp_params_t ucp_params; ucp_worker_params_t worker_params; ucp_config_t *config; ucs_status_t status; ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES; ucp_params.features = 0; status = ucp_perf_test_fill_params(&perf->params, &ucp_params); if (status != UCS_OK) { goto err; } status = ucp_config_read(NULL, NULL, &config); if (status != UCS_OK) { goto err; } status = ucp_init(&ucp_params, config, &perf->ucp.context); ucp_config_release(config); if (status != UCS_OK) { goto err; } worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; worker_params.thread_mode = perf->params.thread_mode; status = ucp_worker_create(perf->ucp.context, &worker_params, &perf->ucp.worker); if (status != UCS_OK) { goto err_cleanup; } status = ucp_perf_test_alloc_mem(perf); if (status != UCS_OK) { ucs_warn("ucp test failed to alocate memory"); goto err_destroy_worker; } status = ucp_perf_test_setup_endpoints(perf, ucp_params.features); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); } goto err_free_mem; } return UCS_OK; err_free_mem: ucp_perf_test_free_mem(perf); err_destroy_worker: ucp_worker_destroy(perf->ucp.worker); err_cleanup: ucp_cleanup(perf->ucp.context); err: return status; } static void ucp_perf_cleanup(ucx_perf_context_t *perf) { ucp_perf_test_cleanup_endpoints(perf); ucp_perf_barrier(perf); ucp_perf_test_free_mem(perf); ucp_worker_destroy(perf->ucp.worker); ucp_cleanup(perf->ucp.context); } static struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs[] = { [UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch, uct_perf_barrier}, [UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch, ucp_perf_barrier} }; static int ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result) { ucx_perf_context_t *perf; ucs_status_t status; ucx_perf_global_init(); if (params->command == UCX_PERF_CMD_LAST) { ucs_error("Test is not selected"); status = UCS_ERR_INVALID_PARAM; goto out; } if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) { ucs_error("Invalid test API parameter (should be UCT or UCP)"); status = UCS_ERR_INVALID_PARAM; goto out; } perf = malloc(sizeof(*perf)); if (perf == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } ucx_perf_test_init(perf, params); if (perf->allocator == NULL) { ucs_error("Unsupported memory type"); status = UCS_ERR_UNSUPPORTED; goto out_free; } status = perf->allocator->init(perf); if (status != UCS_OK) { goto out_free; } status = ucx_perf_funcs[params->api].setup(perf); if (status != UCS_OK) { goto out_free; } if (UCS_THREAD_MODE_SINGLE == params->thread_mode) { if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); if (status != UCS_OK) { goto out_cleanup; } ucx_perf_funcs[params->api].barrier(perf); ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (status == UCS_OK) { ucx_perf_calc_result(perf, result); rte_call(perf, report, result, perf->params.report_arg, 1); } } else { status = ucx_perf_thread_spawn(perf, result); } out_cleanup: ucx_perf_funcs[params->api].cleanup(perf); out_free: free(perf); out: return status; } #if _OPENMP /* multiple threads sharing the same worker/iface */ typedef struct { pthread_t pt; int tid; int ntid; ucs_status_t* statuses; ucx_perf_context_t perf; ucx_perf_result_t result; } ucx_perf_thread_context_t; static void* ucx_perf_thread_run_test(void* arg) { ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; ucx_perf_result_t* result = &tctx->result; ucx_perf_context_t* perf = &tctx->perf; ucx_perf_params_t* params = &perf->params; ucs_status_t* statuses = tctx->statuses; int tid = tctx->tid; int i; if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); statuses[tid] = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); for (i = 0; i < tctx->ntid; i++) { if (UCS_OK != statuses[i]) { goto out; } } ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ #pragma omp barrier statuses[tid] = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); for (i = 0; i < tctx->ntid; i++) { if (UCS_OK != statuses[i]) { goto out; } } #pragma omp master { /* Assuming all threads are fairly treated, reporting only tid==0 TODO: aggregate reports */ ucx_perf_calc_result(perf, result); rte_call(perf, report, result, perf->params.report_arg, 1); } out: return &statuses[tid]; } static int ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucx_perf_thread_context_t* tctx; ucs_status_t* statuses; size_t message_size; ucs_status_t status; int ti, nti; message_size = ucx_perf_get_message_size(&perf->params); omp_set_num_threads(perf->params.thread_count); nti = perf->params.thread_count; tctx = calloc(nti, sizeof(ucx_perf_thread_context_t)); statuses = calloc(nti, sizeof(ucs_status_t)); if ((tctx == NULL) || (statuses == NULL)) { status = UCS_ERR_NO_MEMORY; goto out_free; } #pragma omp parallel private(ti) { ti = omp_get_thread_num(); tctx[ti].tid = ti; tctx[ti].ntid = nti; tctx[ti].statuses = statuses; tctx[ti].perf = *perf; /* Doctor the src and dst buffers to make them thread specific */ tctx[ti].perf.send_buffer += ti * message_size; tctx[ti].perf.recv_buffer += ti * message_size; tctx[ti].perf.offset = ti * message_size; ucx_perf_thread_run_test((void*)&tctx[ti]); } status = UCS_OK; for (ti = 0; ti < nti; ti++) { if (UCS_OK != statuses[ti]) { ucs_error("Thread %d failed to run test: %s", tctx[ti].tid, ucs_status_string(statuses[ti])); status = statuses[ti]; } } out_free: free(statuses); free(tctx); return status; } #else static int ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)"); return UCS_ERR_INVALID_PARAM; } #endif /* _OPENMP */ void ucx_perf_global_init() { static ucx_perf_allocator_t host_allocator = { .init = ucs_empty_function_return_success, .ucp_alloc = ucp_perf_test_alloc_host, .ucp_free = ucp_perf_test_free_host, .memset = memset }; UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest); ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_HOST] = &host_allocator; /* FIXME Memtype allocator modules must be loaded to global scope, otherwise * alloc hooks, which are using dlsym() to get pointer to original function, * do not work. Need to use bistro for memtype hooks to fix it. */ UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL); }
7619.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for schedule(static, 28) simd for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
QuadtreeCartesianEuclid.h
/* * Quadtree.h * * Created on: 21.05.2014 * Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu) */ #ifndef QUADTREECARTESIANEUCLID_H_ #define QUADTREECARTESIANEUCLID_H_ #include <vector> #include <memory> #include <cmath> #include <omp.h> #include <functional> #include "QuadNodeCartesianEuclid.h" namespace NetworKit { template <class T> class QuadtreeCartesianEuclid { friend class QuadTreeCartesianEuclidGTest; public: /** * @param maxR Radius of the managed area. Must be smaller than 1. * @param theoreticalSplit If true, split cells to get the same area in each child cell. Default is false * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true * @param capacity How many points can inhabit a leaf cell before it is split up? * */ QuadtreeCartesianEuclid(Point<double> lower = Point<double>({0.0, 0.0}), Point<double> upper = Point<double>({1.0, 1.0}), bool theoreticalSplit=false, count capacity=1000) { assert(lower.getDimensions() == upper.getDimensions()); root = QuadNodeCartesianEuclid<T>(lower, upper, capacity, theoreticalSplit); this->lower = lower; this->upper = upper; } QuadtreeCartesianEuclid(const vector<Point<double> > &positions, const vector<T> &content, bool theoreticalSplit=false, count capacity=1000) { const count n = positions.size(); assert(content.size() == n); assert(n > 0); this->dimension = positions[0].getDimensions(); vector<double> lowerValue(dimension); vector<double> upperValue(dimension); for (index d = 0; d < dimension; d++) { lowerValue[d] = positions[0].at(d); upperValue[d] = positions[0].at(d); } for (Point<double> pos : positions) { assert(pos.getDimensions() == dimension); for (index d = 0; d < dimension; d++) { if (pos[d] < lowerValue[d]) lowerValue[d] = pos[d]; if (pos[d] > upperValue[d]) upperValue[d] = pos[d]; } } //the upper limit is open, so it needs to be above the points for (index d = 0; d < dimension; d++) { upperValue[d] = std::nextafter(upperValue[d], std::numeric_limits<double>::max()); } this->lower = Point<double>(lowerValue); this->upper = Point<double>(upperValue); root = QuadNodeCartesianEuclid<T>(lower, upper, capacity, theoreticalSplit); for (index i = 0; i < n; i++) { assert(content[i] < n); root.addContent(content[i], positions[i]); } } /** * @param newcomer content to be added at point x * @param angle angular coordinate of x * @param R radial coordinate of x */ void addContent(T newcomer, Point<double> pos) { root.addContent(newcomer, pos); } /** * @param newcomer content to be removed at point x * @param angle angular coordinate of x * @param R radial coordinate of x */ bool removeContent(T toRemove, Point<double> pos) { return root.removeContent(toRemove, pos); } /** * Get all elements, regardless of position * * @return vector<T> of elements */ vector<T> getElements() const { return root.getElements(); } void extractCoordinates(vector<Point<double> > &posContainer) const { root.getCoordinates(posContainer); } void getElementsInEuclideanCircle(const Point<double> circleCenter, const double radius, vector<T> &circleDenizens) const { root.getElementsInEuclideanCircle(circleCenter, radius, circleDenizens); } template<typename L> count getElementsProbabilistically(Point<double> euQuery, L prob, vector<T> &circleDenizens) { return root.getElementsProbabilistically(euQuery, prob, circleDenizens); } void recount() { root.recount(); } count size() const { return root.size(); } count height() const { return root.height(); } count countLeaves() const { return root.countLeaves(); } index indexSubtree(index nextID) { return root.indexSubtree(nextID); } index getCellID(Point<double> pos) const { return root.getCellID(pos); } void reindex() { #pragma omp parallel { #pragma omp single nowait { root.reindex(0); } } } /** * trims the vectors used to hold the content in the leaf cells. Reduces memory usage, makes changes slower */ void trim() { root.trim(); } private: QuadNodeCartesianEuclid<T> root; Point<double> lower; Point<double> upper; count dimension; }; } #endif /* QUADTREE_H_ */
GB_binop__ne_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__ne_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__ne_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__ne_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint16) // A*D function (colscale): GB (_AxD__ne_uint16) // D*A function (rowscale): GB (_DxB__ne_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__ne_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__ne_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint16) // C=scalar+B GB (_bind1st__ne_uint16) // C=scalar+B' GB (_bind1st_tran__ne_uint16) // C=A+scalar GB (_bind2nd__ne_uint16) // C=A'+scalar GB (_bind2nd_tran__ne_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT16 || GxB_NO_NE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vdi_fmt_plug.c
/* VirtualBox (VDI) volume support to John The Ripper * * Written by JimF <jfoug at openwall.net> in 2015. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2015 JimF and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * information about this algorithm taken from: * http://www.sinfocol.org/archivos/2015/07/VBOXDIECracker.phps */ #include "arch.h" #if FMT_EXTERNS_H extern struct fmt_main fmt_vdi; #elif FMT_REGISTERS_H john_register_one(&fmt_vdi); #else #include "aes_xts.h" #include <string.h> #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "crc32.h" #include "johnswap.h" #include "base64_convert.h" #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 16 #else #define OMP_SCALE 4 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct vdi_salt) #define SALT_ALIGN 4 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define MAX_SALT_LEN 32 #define MAX_KEY_LEN 64 #define FORMAT_LABEL "vdi" #define FORMAT_NAME "VirtualBox-VDI AES_XTS" #define FORMAT_TAG "$vdi$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME " + AES_XTS" #if SSE_GROUP_SZ_SHA256 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static unsigned char (*key_buffer)[PLAINTEXT_LENGTH + 1]; static unsigned char (*crypt_out)[MAX_SALT_LEN]; static struct fmt_tests tests[] = { // The 'jtr' test hashed were made with VirtualBox. The others were made with pass_gen.pl {"$vdi$aes-xts256$sha256$2000$2000$64$32$709f6df123f1ccb126ea1f3e565beb78d39cafdc98e0daa2e42cc43cef11f786$0340f137136ad54f59f4b24ef0bf35240e140dfd56bbc19ce70aee6575f0aabf$0a27e178f47a0b05a752d6e917b89ef4205c6ae76705c34858390f8afa6cf03a45d98fab53b76d8d1c68507e7810633db4b83501a2496b7e443eccb53dbc8473$7ac5f4ad6286406e84af31fd36881cf558d375ae29085b08e6f65ebfd15376ca", "jtr"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$d72ee0aecd496b084117bb8d87f5e37de71973518a2ef992c895907a09b73b83$afb33e56a7f81b1e3db70f599b62ecf3d223405abb63bcf569bb29acab9c81a6$3b3769fd3cfaf8e11f67fdc9d54aed8c8962a769f3f66cb2b9cb8700c01a66e6b1c996fdee9727188c765bde224047b8ced7a9b5f5381e7ad7271a9cbf049fde$1c5bca64cbedd76802eddc3e6ffd834e8c1f1ff1157de6ae6feb3740051e2cfa", "password"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$a4e4480927153ecbb7509afb8d49468e62c8bb22aaab458f4115bff63364de41$c69605220d1ed03618f0236a88e225db1ec69e7a95dbe63ee00778cc8b91424e$0a1de9c85452fafd19ceb0821a115c7fec6fab4ef51fc57fabc25bf973417684a78683267513923f88231a6efd2442ce9279f2a5614d4cfcb930b5ef413f34c3$d79ea5522ad79fc409bbcd1e8a2bb75e16a53e1eef940b4fe954cee1c2491fd2", "ripper"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$450ce441592003821931e73ea314dcd0effff1b74b61a8fc4046573d0f448057$18c48e3d7677bc9471607cec83d036b963f23f7bb16f09ea438395b61dcf14d5$c4893bce14fa3a1f915004b9ec0fd6a7215ddebdd2ca4bc2b4ec164253c2f2319685a8f8245ec8e2d9e7a53c6aec5fd2d4ca7ba510ffc7456a72285d40ce7d35$793e58317b9bf6318d1b4cef1e05f5a8579a50fb7efde884ea68b096b7043aad", "john"}, {"$vdi$aes-xts256$sha256$2000$2000$64$32$472476df7d16f80d612d4c9ff35678a2011605dc98b76b6d78632859c259d5d0$aa89f9bea1139da6ace97e13c823d713030fda0c8c55ad2fcea358746cc0b4cc$507aaf7c9e00b492042072a17b3975fc88e30e1d5927e63cb335c630b7b873e4c9af2df63c95b42896e15bb33c37c9f572a65f97441b3707ce5d81c521dfd30e$111004a8d9167b55ff5db510cc136f2bceacf4a9f50807742e2bbc110847174e", "really long password with ! stuff!!! ;)"}, // some aes-128 samples They run exactly same speed as the AES-256 hashes. {"$vdi$aes-xts128$sha256$2000$2000$32$32$d3fd2bb27734f25918ac726717b192091253441c4bc71a814d0a6483e73325ea$ef560858b4c068bd8d994cdf038f51cb1b9f59335d72cb874e79a13c5b6aa84a$79ff000f7638d39b0d02ad08dfcede8740087e334e98022465a380bdf78fff13$302f4c4f58c0dee9676dfdaf3ada9e3d7ec4b5bfc7e6565c941f4ec7337368d4", "jtr"}, {"$vdi$aes-xts128$sha256$2000$2000$32$32$16894e7496bac97bc467faa3efe5a3ba009e1591990c9422e4352bfb39ead4d6$00780af3703680b63239b61d0395e9ff673ee843d7a77d61541e0fdc096c49d1$72434a81a27bb1cd85be529600c3620e4eeed45d12f8ef337cc51c040306be7d$4a5b2129577289a8a0f6a93d7a578cd248d158bc70d6ab89f5ccf31704812e85", "blowhard"}, {"$vdi$aes-xts128$sha256$2000$2000$32$32$4e9d103c944479a4e2b2e33d4757e11fc1a7263ba3b2e99d9ad4bc9aeb7f9337$ade43b6eb1d878f0a5532070fb81697a8164ff7b9798e35649df465068ae7e81$f1e443252c872e305eda848d05676a20af8df405262984b39baf0f0aa1b48247$2601e9e08d19ca20745a6a33f74259bdca06014455370b0bb6b79eb0c5e60581", "foobar"}, {NULL} }; static struct vdi_salt { unsigned char salt1[MAX_SALT_LEN]; unsigned char salt2[MAX_SALT_LEN]; unsigned char encr[MAX_KEY_LEN]; int crypt_type; // 1, 256, 384, 512 for the pbkdf2 algo (currently ONLY 256 implemented, so that is all we handle right now). int evp_type; // 128 or 256 for AES-128XTS or AES-256XTS int rounds1; int rounds2; int keylen; int saltlen; } *psalt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key_buffer = mem_calloc(self->params.max_keys_per_crypt, sizeof(*key_buffer)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(key_buffer); MEM_FREE(crypt_out); } static int valid(char* ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int keylen; int saltlen; char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext + FORMAT_TAG_LEN); keeptr = ctcopy; if ((p = strtokm(ctcopy, "$")) == NULL) /* decr type*/ goto err; if (strcmp(p, "aes-xts256") && strcmp(p, "aes-xts128")) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2 algo */ goto err; //if (strcmp(p, "sha1") && strcmp(p, "sha256") && strcmp(p, "sha384") && strcmp(p, "sha512")) if (strcmp(p, "sha256")) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2-1 iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2-2 iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* key length */ goto err; if (!isdec(p)) goto err; keylen = atoi(p); if(keylen > MAX_KEY_LEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if(saltlen > MAX_SALT_LEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt1 */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt2 */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* encr_key */ goto err; if(strlen(p) != keylen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* final_result */ goto err; if(strlen(p) != saltlen * 2) goto err; if(!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) != NULL) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void set_salt(void *salt) { psalt = salt; } static void* get_salt(char *ciphertext) { static char buf[sizeof(struct vdi_salt)+4]; struct vdi_salt *s = (struct vdi_salt *)mem_align(buf, 4); char *ctcopy, *keeptr; char *p; memset(buf, 0, sizeof(buf)); ctcopy = strdup(ciphertext + FORMAT_TAG_LEN); keeptr = ctcopy; p = strtokm(ctcopy, "$"); /* decr type*/ s->evp_type = !strcmp(p, "aes-xts128") ? 128 : 256; p = strtokm(NULL, "$"); /* pbkdf2 algo */ s->crypt_type = 256; /* right now, we ONLY handle pbkdf2-sha256 */ p = strtokm(NULL, "$"); /* pbkdf2-1 iterations */ s->rounds1 = atoi(p); p = strtokm(NULL, "$"); /* pbkdf2-2 iterations */ s->rounds2 = atoi(p); p = strtokm(NULL, "$"); /* key length */ s->keylen = atoi(p); p = strtokm(NULL, "$"); /* salt length */ s->saltlen = atoi(p); p = strtokm(NULL, "$"); /* salt1 */ base64_convert(p, e_b64_hex, s->saltlen*2, s->salt1, e_b64_raw, s->saltlen, 0, 0); p = strtokm(NULL, "$"); /* salt2 */ base64_convert(p, e_b64_hex, s->saltlen*2, s->salt2, e_b64_raw, s->saltlen, 0, 0); p = strtokm(NULL, "$"); /* encr_key */ base64_convert(p, e_b64_hex, s->keylen*2, s->encr, e_b64_raw, s->keylen, 0, 0); MEM_FREE(keeptr); return s; } static int crypt_all(int *pcount, struct db_salt *salt) { int i; int inc=1; const int count = *pcount; #if SSE_GROUP_SZ_SHA256 inc = SSE_GROUP_SZ_SHA256; #endif #ifdef _OPENMP #pragma omp parallel for #endif for(i = 0; i < count; i += inc) { unsigned char key[MAX_KEY_LEN]; #if SSE_GROUP_SZ_SHA256 unsigned char Keys[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN]; unsigned char Decr[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN]; #else unsigned char Decr[1][MAX_KEY_LEN]; int ksz = strlen((char *)key_buffer[i]); #endif int j; #if SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256]; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { unsigned char *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { lens[j] = strlen((char*)(key_buffer[i+j])); pin[j] = key_buffer[i+j]; x.pout[j] = Keys[j]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt1, psalt->saltlen, psalt->rounds1, &(x.poutc), psalt->keylen, 0); #else pbkdf2_sha256((const unsigned char*)key_buffer[i], ksz, psalt->salt1, psalt->saltlen, psalt->rounds1, key, psalt->keylen, 0); #endif for (j = 0; j < inc; ++j) { #if SSE_GROUP_SZ_SHA256 memcpy(key, Keys[j], sizeof(key)); #endif // Try to decrypt using AES AES_XTS_decrypt(key, Decr[j], psalt->encr, psalt->keylen, psalt->evp_type); } #if SSE_GROUP_SZ_SHA256 for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { lens[j] = psalt->keylen; pin[j] = Decr[j]; x.pout[j] = crypt_out[i+j]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt2, psalt->saltlen, psalt->rounds2, &(x.poutc), psalt->saltlen, 0); #else pbkdf2_sha256(Decr[0], psalt->keylen, psalt->salt2, psalt->saltlen, psalt->rounds2, crypt_out[i], psalt->saltlen, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], 4)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char* key, int index) { strcpy((char*)(key_buffer[index]), key); } static char *get_key(int index) { return (char*)(key_buffer[index]); } static int salt_hash(void *salt) { unsigned v=0, i; unsigned char *psalt = (unsigned char *)salt; psalt += 40; // skips us to the salt stuff. for (i = 0; i < 64; ++i) { v *= 11; v += psalt[i]; } return v & (SALT_HASH_SIZE - 1); } static void *binary(char *ciphertext) { static ARCH_WORD_32 full[MAX_SALT_LEN / 4]; unsigned char *realcipher = (unsigned char*)full; ciphertext = strrchr(ciphertext, '$') + 1; base64_convert(ciphertext, e_b64_hex, strlen(ciphertext), realcipher, e_b64_raw, MAX_SALT_LEN, 0, 0); return (void*)realcipher; } struct fmt_main fmt_vdi = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, "", // BENCHMARK_COMMENT -1, // BENCHMARK_LENGTH 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
rose_regression01.c
/* * Contributed by Jeff Keasler * * Liao 2/10/2010 * */ #include "omp.h" typedef double real8; void foo(real8 *a,real8 *b,real8 *c,real8 *d,int len) { int icol; int jrow; int l; for (l = 0; l <= len - 1; l += 1) { int l8 = l * 8; real8 e = d[l * 3 + 0]; real8 f = d[l * 3 + 1]; real8 g = d[l * 3 + 2]; real8 h = b[l]; real8 tmp[8]; #pragma omp parallel for private (icol) firstprivate (e,f,g) for (icol = 0; icol <= 7; icol += 1) { tmp[icol] = e * c[(icol + l8) * 4 + 1] + f * c[(icol + l8) * 4 + 2] + g * c[(icol + l8) * 4 + 3]; } for (jrow = 0; jrow <= 7; jrow += 1) { real8 hj1 = h * c[(jrow + l8) * 4]; #pragma omp parallel for private (icol) firstprivate (hj1) for (icol = 0; icol <= 7; icol += 1) { a[icol + (jrow + l8) * 8] += hj1 * tmp[icol]; } } } }
udr-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ struct S {}; void foo (void *, void *); void bar (void *, void *); void baz (void *); #pragma omp declare reduction(+:struct S:foo (&omp_out, &omp_in))initializer(bar(&omp_priv, &omp_orig)) void test (void) { struct S a, b[10]; #pragma omp parallel reduction(+:a) baz (&a); }
oldoffice_fmt_plug.c
/* * MS Office 97-2003 cracker patch for JtR. Hacked together during May of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * Copyright (c) 2014, magnum * Copyright (c) 2009, David Leblanc (http://offcrypto.codeplex.com/) * * License: Microsoft Public License (MS-PL) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_oldoffice; #elif FMT_REGISTERS_H john_register_one(&fmt_oldoffice); #else #include <string.h> #include <errno.h> #ifdef _OPENMP #include <omp.h> #endif #include "md5.h" #include "rc4.h" #include "stdint.h" #include "sha.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "unicode.h" #include "dyna_salt.h" #include "memdbg.h" #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #define FORMAT_LABEL "oldoffice" #define FORMAT_NAME "MS Office <= 2003" #define ALGORITHM_NAME "MD5/SHA1 RC4 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define PLAINTEXT_LENGTH 64 #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(dyna_salt*) #define SALT_ALIGN MEM_ALIGN_WORD #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define CIPHERTEXT_LENGTH (TAG_LEN + 120) #define FORMAT_TAG "$oldoffice$" #define TAG_LEN (sizeof(FORMAT_TAG) - 1) static struct fmt_tests oo_tests[] = { {"$oldoffice$1*de17a7f3c3ff03a39937ba9666d6e952*2374d5b6ce7449f57c9f252f9f9b53d2*e60e1185f7aecedba262f869c0236f81", "test"}, {"$oldoffice$0*e40b4fdade5be6be329c4238e2099b8a*259590322b55f7a3c38cb96b5864e72d*2e6516bfaf981770fe6819a34998295d", "123456789012345"}, {"$oldoffice$4*163ae8c43577b94902f58d0106b29205*87deff24175c2414cb1b2abdd30855a3*4182446a527fe4648dffa792d55ae7a15edfc4fb", "Google123"}, /* Meet-in-the-middle candidate produced with oclHashcat -m9710 */ /* Real pw is "hashcat", one collision is "zvDtu!" */ {"", "zvDtu!", {"", "$oldoffice$1*d6aabb63363188b9b73a88efb9c9152e*afbbb9254764273f8f4fad9a5d82981f*6f09fd2eafc4ade522b5f2bee0eaf66d","f2ab1219ae"} }, #if PLAINTEXT_LENGTH >= 24 /* 2003-RC4-40bit-MS-Base-Crypto-1.0_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*9f32522fe9bcb69b12f39d3c24b39b2f*fac8b91a8a578468ae7001df4947558f*f2e267a5bea45736b52d6d1051eca1b935eabf3a", "myhovercraftisfullofeels"}, /* Test-RC4-40bit-MS-Base-DSS_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*095b777a73a10fb6bcd3e48d50f8f8c5*36902daab0d0f38f587a84b24bd40dce*25db453f79e8cbe4da1844822b88f6ce18a5edd2", "myhovercraftisfullofeels"}, /* 2003-RC4-40bit-MS-Base-DH-SChan_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*284bc91cb64bc847a7a44bc7bf34fb69*1f8c589c6fcbd43c42b2bc6fff4fd12b*2bc7d8e866c9ea40526d3c0a59e2d37d8ded3550", "myhovercraftisfullofeels"}, /* Test-RC4-128bit-MS-Strong-Crypto_myhovercraftisfullofeels_.doc */ {"$oldoffice$4*a58b39c30a06832ee664c1db48d17304*986a45cc9e17e062f05ceec37ec0db17*fe0c130ef374088f3fec1979aed4d67459a6eb9a", "myhovercraftisfullofeels"}, /* 2003-RC4-40bit-MS-Base-1.0_myhovercraftisfullofeels_.xls */ {"$oldoffice$3*f426041b2eba9745d30c7949801f7d3a*888b34927e5f31e2703cc4ce86a6fd78*ff66200812fd06c1ba43ec2be9f3390addb20096", "myhovercraftisfullofeels"}, #endif /* the following hash was extracted from Proc2356.ppt (manually + by oldoffice2john.py */ {"$oldoffice$3*DB575DDA2E450AB3DFDF77A2E9B3D4C7*AB183C4C8B5E5DD7B9F3AF8AE5FFF31A*B63594447FAE7D4945D2DAFD113FD8C9F6191BF5", "crypto"}, {"$oldoffice$3*3fbf56a18b026e25815cbea85a16036c*216562ea03b4165b54cfaabe89d36596*91308b40297b7ce31af2e8c57c6407994b205590", "openwall"}, {NULL} }; /* Password encoded in UCS-2 */ static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1]; /* UCS-2 password length, in octets */ static int *saved_len; /* Last hash with this salt and plain */ static unsigned char (*mitm_key)[16]; static unsigned char (*rc4_key)[16]; static int any_cracked, *cracked; static size_t cracked_size; static int new_keys; typedef struct { dyna_salt dsalt; int type; unsigned char salt[16]; unsigned char verifier[16]; /* or encryptedVerifier */ unsigned char verifierHash[20]; /* or encryptedVerifierHash */ unsigned int has_mitm; unsigned char mitm[5]; /* Meet-in-the-middle hint, if we have one */ } custom_salt; static struct { int ct_hash; unsigned char mitm[10]; } mitm_catcher; static custom_salt cs; static custom_salt *cur_salt = &cs; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (options.target_enc == UTF_8) self->params.plaintext_length = 3 * PLAINTEXT_LENGTH > 125 ? 125 : 3 * PLAINTEXT_LENGTH; saved_key = mem_alloc(self->params.max_keys_per_crypt * sizeof(*saved_key)); saved_len = mem_alloc(self->params.max_keys_per_crypt * sizeof(*saved_len)); mitm_key = mem_alloc(self->params.max_keys_per_crypt * sizeof(*mitm_key)); rc4_key = mem_alloc(self->params.max_keys_per_crypt * sizeof(*rc4_key)); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(1, cracked_size); } static void done(void) { MEM_FREE(cracked); MEM_FREE(rc4_key); MEM_FREE(mitm_key); MEM_FREE(saved_len); MEM_FREE(saved_key); } /* Based on ldr_cracked_hash from loader.c */ #define HASH_LOG 30 #define HASH_SIZE (1 << HASH_LOG) static int hex_hash(char *ciphertext) { unsigned int hash, extra; unsigned char *p = (unsigned char *)ciphertext; hash = p[0] | 0x20; /* ASCII case insensitive */ if (!hash) goto out; extra = p[1] | 0x20; if (!extra) goto out; p += 2; while (*p) { hash <<= 1; extra <<= 1; hash += p[0] | 0x20; if (!p[1]) break; extra += p[1] | 0x20; p += 2; if (hash & 0xe0000000) { hash ^= hash >> HASH_LOG; extra ^= extra >> (HASH_LOG - 1); hash &= HASH_SIZE - 1; } } hash -= extra; hash ^= extra << (HASH_LOG / 2); hash ^= hash >> HASH_LOG; hash &= HASH_SIZE - 1; out: return hash; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int type; if (strncmp(ciphertext, FORMAT_TAG, TAG_LEN)) return 0; if (strlen(ciphertext) > CIPHERTEXT_LENGTH) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += TAG_LEN; if (!(ptr = strtokm(ctcopy, "*"))) /* type */ goto error; type = atoi(ptr); if (type < 0 || type > 4) goto error; if (!(ptr = strtokm(NULL, "*"))) /* salt */ goto error; if (hexlen(ptr) != 32) goto error; if (!(ptr = strtokm(NULL, "*"))) /* verifier */ goto error; if (hexlen(ptr) != 32) goto error; if (!(ptr = strtokm(NULL, "*"))) /* verifier hash */ goto error; if (type < 3 && hexlen(ptr) != 32) goto error; else if (type >= 3 && hexlen(ptr) != 40) goto error; /* * Deprecated field: mitm hash (40-bit RC4). The new way to put it is in the * uid field, like hashcat's example hash. */ if (type <= 3 && (ptr = strtokm(NULL, "*"))) { if (hexlen(ptr) != 10) goto error; } MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } /* uid field may contain a meet-in-the-middle hash */ static char *prepare(char *split_fields[10], struct fmt_main *self) { if (split_fields[0] && valid(split_fields[0], self) && split_fields[1] && hexlen(split_fields[1]) == 10) { mitm_catcher.ct_hash = hex_hash(split_fields[0]); memcpy(mitm_catcher.mitm, split_fields[1], 10); return split_fields[0]; } else if (valid(split_fields[1], self) && split_fields[2] && hexlen(split_fields[2]) == 10) { mitm_catcher.ct_hash = hex_hash(split_fields[1]); memcpy(mitm_catcher.mitm, split_fields[2], 10); } return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH]; char *p; strnzcpy(out, ciphertext, sizeof(out)); strlwr(out); /* Drop legacy embedded MITM hash */ if ((p = strrchr(out, '*')) && hexlen(&p[1]) == 10) *p = 0; return out; } static void *get_salt(char *ciphertext) { static void *ptr; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LEN; /* skip over "$oldoffice$" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.verifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); if (cs.type < 3) { for (i = 0; i < 16; i++) cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } else { for (i = 0; i < 20; i++) cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } if ((p = strtokm(NULL, "*"))) { /* Deprecated field */ cs.has_mitm = 1; for (i = 0; i < 5; i++) cs.mitm[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } else if (hex_hash(ciphertext) == mitm_catcher.ct_hash) { cs.has_mitm = 1; for (i = 0; i < 5; i++) cs.mitm[i] = atoi16[ARCH_INDEX(mitm_catcher.mitm[i * 2])] * 16 + atoi16[ARCH_INDEX(mitm_catcher.mitm[i * 2 + 1])]; } else cs.has_mitm = 0; MEM_FREE(keeptr); cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(custom_salt, type); cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(custom_salt, type, has_mitm, 0); cs.dsalt.salt_alloc_needs_free = 0; ptr = mem_alloc_copy(&cs, sizeof(custom_salt), MEM_ALIGN_WORD); return &ptr; } static char *source(char *source, void *binary) { static char Buf[CIPHERTEXT_LENGTH]; unsigned char *cpi, *cp = (unsigned char*)Buf; int i, len; extern volatile int bench_running; cp += sprintf(Buf, "%s%d*", FORMAT_TAG, cur_salt->type); cpi = cur_salt->salt; for (i = 0; i < 16; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } *cp++ = '*'; cpi = cur_salt->verifier; for (i = 0; i < 16; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } *cp++ = '*'; len = (cur_salt->type < 3) ? 16 : 20; cpi = cur_salt->verifierHash; for (i = 0; i < len; i++) { *cp++ = itoa16[*cpi >> 4]; *cp++ = itoa16[*cpi & 0xf]; cpi++; } *cp = 0; if (cur_salt->type < 4 && cur_salt->has_mitm && !bench_running) { static int last; char out[11]; if (last != hex_hash(Buf)) { last = hex_hash(Buf); cpi = cur_salt->mitm; for (i = 0; i < 5; i++) { out[2 * i + 0] = itoa16[*cpi >> 4]; out[2 * i + 1] = itoa16[*cpi & 0xf]; cpi++; } out[10] = 0; fprintf(stderr, "MITM key: %s\n", out); } } return Buf; } static void set_salt(void *salt) { if (memcmp(cur_salt->salt, (*(custom_salt**)salt)->salt, 16)) new_keys = 1; cur_salt = *(custom_salt**)salt; } static int salt_compare(const void *x, const void *y) { int c; c = memcmp((*(custom_salt**)x)->salt, (*(custom_salt**)y)->salt, 16); if (c) return c; c = dyna_salt_cmp((void*)x, (void*)y, SALT_SIZE); return c; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i; RC4_KEY key; if (cur_salt->type < 3) { MD5_CTX ctx; unsigned char pwdHash[16]; unsigned char hashBuf[21 * 16]; if (new_keys) { unsigned char key_hash[16]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final(key_hash, &ctx); for (i = 0; i < 16; i++) { memcpy(hashBuf + i * 21, key_hash, 5); memcpy(hashBuf + i * 21 + 5, cur_salt->salt, 16); } MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 21 * 16); MD5_Final(mitm_key[index], &ctx); } // Early reject if we got a hint if (cur_salt->has_mitm && memcmp(mitm_key[index], cur_salt->mitm, 5)) continue; if (new_keys) { memcpy(hashBuf, mitm_key[index], 5); memset(hashBuf + 5, 0, 4); MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 9); MD5_Final(rc4_key[index], &ctx); } RC4_set_key(&key, 16, rc4_key[index]); /* rc4Key */ RC4(&key, 16, cur_salt->verifier, hashBuf); /* encryptedVerifier */ RC4(&key, 16, cur_salt->verifierHash, hashBuf + 16); /* encryptedVerifierHash */ /* hash the decrypted verifier */ MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 16); MD5_Final(pwdHash, &ctx); if (!memcmp(pwdHash, hashBuf + 16, 16)) { #ifdef _OPENMP #pragma omp critical #endif { any_cracked = cracked[index] = 1; cur_salt->has_mitm = 1; memcpy(cur_salt->mitm, mitm_key[index], 5); } } } else { SHA_CTX ctx; unsigned char H0[24]; unsigned char Hfinal[20]; unsigned char DecryptedVerifier[16]; unsigned char DecryptedVerifierHash[20]; if (new_keys) { unsigned char key_hash[20]; SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 16); SHA1_Update(&ctx, saved_key[index], saved_len[index]); SHA1_Final(H0, &ctx); memset(&H0[20], 0, 4); SHA1_Init(&ctx); SHA1_Update(&ctx, H0, 24); SHA1_Final(key_hash, &ctx); if (cur_salt->type < 4) { memcpy(mitm_key[index], key_hash, 5); memset(&mitm_key[index][5], 0, 11); } else memcpy(mitm_key[index], key_hash, 16); } // Early reject if we got a hint if (cur_salt->has_mitm && memcmp(mitm_key[index], cur_salt->mitm, 5)) continue; RC4_set_key(&key, 16, mitm_key[index]); /* dek */ RC4(&key, 16, cur_salt->verifier, DecryptedVerifier); RC4(&key, 16, cur_salt->verifierHash, DecryptedVerifierHash); SHA1_Init(&ctx); SHA1_Update(&ctx, DecryptedVerifier, 16); SHA1_Final(Hfinal, &ctx); if (!memcmp(Hfinal, DecryptedVerifierHash, 16)) { #ifdef _OPENMP #pragma omp critical #endif { any_cracked = cracked[index] = 1; if (cur_salt->type < 4) { cur_salt->has_mitm = 1; memcpy(cur_salt->mitm, mitm_key[index], 5); } } } } } new_keys = 0; return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { /* convert key to UTF-16LE */ saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (saved_len[index] < 0) saved_len[index] = strlen16(saved_key[index]); saved_len[index] <<= 1; new_keys = 1; } static char *get_key(int index) { return (char*)utf16_to_enc(saved_key[index]); } static unsigned int oo_hash_type(void *salt) { custom_salt *my_salt; my_salt = *(custom_salt**)salt; return (unsigned int) my_salt->type; } struct fmt_main fmt_oldoffice = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_DYNA_SALT, { "hash type", }, oo_tests }, { init, done, fmt_default_reset, prepare, valid, split, fmt_default_binary, get_salt, { oo_hash_type, }, source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, salt_compare, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
sparsemat.c
#include "ghost/config.h" #include "ghost/types.h" #include "ghost/sparsemat.h" #include "ghost/context.h" #include "ghost/util.h" #include "ghost/locality.h" #include "ghost/log.h" #include "ghost/machine.h" #include "ghost/bincrs.h" #include "ghost/matrixmarket.h" #include "ghost/instr.h" #include "ghost/constants.h" #include "ghost/kacz_hybrid_split.h" //#include "ghost/kacz_split_analytical.h" #include "ghost/rcm_dissection.h" #include <libgen.h> #include <math.h> #include <limits.h> const ghost_sparsemat_src_rowfunc GHOST_SPARSEMAT_SRC_ROWFUNC_INITIALIZER = { .func = NULL, .maxrowlen = 0, .base = 0, .flags = GHOST_SPARSEMAT_ROWFUNC_DEFAULT, .arg = NULL, .gnrows = 0, .gncols = 0, .funcinit = NULL }; const ghost_sparsemat_traits GHOST_SPARSEMAT_TRAITS_INITIALIZER = { .flags = GHOST_SPARSEMAT_DEFAULT, .symmetry = GHOST_SPARSEMAT_SYMM_GENERAL, .T = 1, .C = 32, .scotchStrat = (char*)GHOST_SCOTCH_STRAT_DEFAULT, .sortScope = 1, .datatype = GHOST_DT_NONE, .opt_blockvec_width = 0 }; static const char * ghost_sparsemat_formatName(ghost_sparsemat *mat); static ghost_error ghost_sparsemat_split(ghost_sparsemat *mat); #ifdef GHOST_HAVE_CUDA static ghost_error ghost_sparsemat_upload(ghost_sparsemat *mat); #endif static ghost_error ghost_set_kacz_ratio(ghost_context *ctx, ghost_sparsemat *mat); const ghost_spmv_opts GHOST_SPMV_OPTS_INITIALIZER = { .flags = GHOST_SPMV_DEFAULT, .alpha = NULL, .beta = NULL, .gamma = NULL, .delta = NULL, .eta = NULL, .dot = NULL, .z = NULL, .blocksz = GHOST_LIDX_MAX }; ghost_error ghost_sparsemat_create(ghost_sparsemat ** mat, ghost_context *context, ghost_sparsemat_traits *traits, int nTraits) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_SETUP); UNUSED(nTraits); ghost_error ret = GHOST_SUCCESS; if( traits->C < 1 || (traits->C & (-traits->C)) != traits->C) { GHOST_ERROR_LOG("Only powers of two are allowed for the chunk height C"); return GHOST_ERR_INVALID_ARG; } GHOST_CALL_GOTO(ghost_malloc((void **)mat,sizeof(ghost_sparsemat)),err,ret); (*mat)->traits = traits[0]; if (nTraits == 3) { (*mat)->splittraits[0] = traits[1]; (*mat)->splittraits[1] = traits[2]; } else { (*mat)->splittraits[0] = traits[0]; (*mat)->splittraits[1] = traits[0]; } (*mat)->context = context; if (context) { context->nmats++; } (*mat)->localPart = NULL; (*mat)->remotePart = NULL; (*mat)->name = "Sparse matrix"; (*mat)->col_orig = NULL; (*mat)->nzDist = NULL; (*mat)->avgRowBand = 0.; (*mat)->avgAvgRowBand = 0.; (*mat)->smartRowBand = 0.; (*mat)->maxRowLen = 0; (*mat)->nMaxRows = 0; (*mat)->variance = 0.; (*mat)->deviation = 0.; (*mat)->cv = 0.; // (*mat)->ncols = context->col_map->dim; (*mat)->nEnts = 0; //(*mat)->nnz = 0; // TODO do this in the actual sorting function /* if ((*mat)->traits.sortScope == GHOST_SPARSEMAT_SORT_GLOBAL) { (*mat)->traits.sortScope = (*mat)->context->row_map->gdim; } else if ((*mat)->traits.sortScope == GHOST_SPARSEMAT_SORT_LOCAL) { (*mat)->traits.sortScope = (*mat)->context->row_map->dim; } */ // Note: Datatpye check and elSize computation moved to creation // functions ghost_sparsemat_init_* (*mat)->elSize = 0; if (!((*mat)->traits.flags & (GHOST_SPARSEMAT_HOST | GHOST_SPARSEMAT_DEVICE))) { // no placement specified GHOST_DEBUG_LOG(2,"Setting matrix placement"); ghost_type ghost_type; GHOST_CALL_GOTO(ghost_type_get(&ghost_type),err,ret); if (ghost_type == GHOST_TYPE_CUDA) { (*mat)->traits.flags |= (ghost_sparsemat_flags)GHOST_SPARSEMAT_DEVICE; } else { (*mat)->traits.flags |= (ghost_sparsemat_flags)GHOST_SPARSEMAT_HOST; } } ghost_type ghost_type; GHOST_CALL_RETURN(ghost_type_get(&ghost_type)); (*mat)->val = NULL; (*mat)->col = NULL; (*mat)->chunkMin = NULL; (*mat)->chunkLen = NULL; (*mat)->chunkLenPadded = NULL; (*mat)->rowLen = NULL; (*mat)->rowLen2 = NULL; (*mat)->rowLen4 = NULL; (*mat)->rowLenPadded = NULL; (*mat)->chunkStart = NULL; goto out; err: GHOST_ERROR_LOG("Error. Free'ing resources"); free(*mat); *mat = NULL; out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_SETUP); return ret; } /** * @brief Sort the entries in a given row physically to have increasing * column indices. * * @param[inout] col The column indices of the row. * @param[inout] val The values of the row. * @param[in] valSize The size of one entry. * @param[in] rowlen The length of the row. * @param[in] stride The stride between successive elements in the row (1 * for CRS, C for SELL-C). * * @return ::GHOST_SUCCESS on success or an error indicator. */ static ghost_error ghost_sparsemat_sortrow(ghost_gidx *col, char *val, size_t valSize, ghost_lidx rowlen, ghost_lidx stride, ghost_lidx row, bool diagfirst) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION); ghost_lidx n; ghost_lidx c; ghost_lidx swpcol; char swpval[valSize]; // set diag's column to -1 such that it gets sorted to the beginning if (diagfirst) { for (c=0; c<rowlen; c++) { if (col[c*stride] == row) { col[c*stride] = -1; } } } // bubble sort in row for (n=rowlen; n>1; n--) { for (c=0; c<n-1; c++) { if (col[c*stride] > col[(c+1)*stride]) { swpcol = col[c*stride]; col[c*stride] = col[(c+1)*stride]; col[(c+1)*stride] = swpcol; memcpy(&swpval,&val[c*stride*valSize],valSize); memcpy(&val[c*stride*valSize],&val[(c+1)*stride*valSize],valSize); memcpy(&val[(c+1)*stride*valSize],&swpval,valSize); } } } // re-set diag's column to the correct value if (diagfirst) { col[0] = row; } GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION); return GHOST_SUCCESS; } //calculates bandwidth of the matrix mat with possible permutations applied from ctx and stores the information in ctx static ghost_error ghost_calculate_bw(ghost_context *ctx, ghost_sparsemat *mat) { GHOST_INSTR_START("calculate badwidth"); ghost_error ret = GHOST_SUCCESS; int me; GHOST_CALL_GOTO(ghost_rank(&me,ctx->mpicomm),err,ret); ghost_gidx lower_bw = 0, upper_bw = 0, max_col=0; #pragma omp parallel for reduction(max:lower_bw) reduction(max:upper_bw) reduction(max:max_col) for (ghost_lidx i=0; i<ctx->row_map->ldim[me]; i++) { ghost_lidx orig_row = i; if (ctx->row_map->loc_perm) { orig_row = ctx->row_map->loc_perm_inv[i]; } ghost_lidx * col = &mat->col[mat->chunkStart[orig_row]]; ghost_lidx orig_row_len = mat->chunkStart[orig_row+1]-mat->chunkStart[orig_row]; ghost_gidx start_col = INT_MAX; ghost_gidx end_col = 0; if(ctx->row_map->loc_perm){ if(ctx->col_map->loc_perm == NULL) { for(int j=0; j<orig_row_len; ++j) { start_col = MIN(start_col, ctx->row_map->loc_perm[col[j]]); end_col = MAX(end_col, ctx->row_map->loc_perm[col[j]]); } } else { for(int j=0; j<orig_row_len; ++j) { start_col = MIN(start_col, ctx->col_map->loc_perm[col[j]]); end_col = MAX(end_col, ctx->col_map->loc_perm[col[j]]); } } } else { for(int j=0; j<orig_row_len; ++j) { start_col = MIN(start_col, col[j]); end_col = MAX(end_col, col[j]); } } lower_bw = MAX(lower_bw, i-start_col); upper_bw = MAX(upper_bw, end_col - i); max_col = MAX(max_col, end_col); } ctx->lowerBandwidth = lower_bw; ctx->upperBandwidth = upper_bw; ctx->bandwidth = lower_bw + upper_bw; ctx->maxColRange = max_col; ctx->bandwidth = ctx->lowerBandwidth + ctx->upperBandwidth; GHOST_INFO_LOG("RANK<%d>: LOWER BANDWIDTH =%"PRGIDX", UPPER BANDWIDTH =%"PRGIDX", TOTAL BANDWIDTH =%"PRGIDX,me,ctx->lowerBandwidth,ctx->upperBandwidth,ctx->bandwidth); GHOST_INSTR_STOP("calculate bandwidth"); goto out; err: GHOST_ERROR_LOG("ERROR in Bandwidth Calculation"); return ret; out: return ret; } static ghost_error ghost_set_kacz_ratio(ghost_context *ctx, ghost_sparsemat *mat) { int nthread; #ifdef GHOST_HAVE_OPENMP #pragma omp parallel { #pragma omp master nthread = ghost_omp_nthread(); } #else nthread = 1; #endif ctx->kacz_setting.active_threads = nthread; ghost_calculate_bw(ctx,mat); ctx->kaczRatio = ((double)SPM_NROWS(mat))/ctx->bandwidth; return GHOST_SUCCESS; } ghost_error ghost_sparsemat_perm_global_cols(ghost_gidx *col, ghost_lidx ncols, ghost_context *context) { #ifdef GHOST_HAVE_MPI GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_COMMUNICATION); int me, nprocs,i; ghost_rank(&me,context->mpicomm); ghost_nrank(&nprocs,context->mpicomm); for (i=0; i<nprocs; i++) { ghost_lidx nels = 0; if (i==me) { nels = ncols; } MPI_Bcast(&nels,1,ghost_mpi_dt_lidx,i,context->mpicomm); ghost_gidx *colsfromi; ghost_malloc((void **)&colsfromi,nels*sizeof(ghost_gidx)); if (i==me) { memcpy(colsfromi,col,nels*sizeof(ghost_gidx)); } MPI_Bcast(colsfromi,nels,ghost_mpi_dt_gidx,i,context->mpicomm); ghost_lidx el; for (el=0; el<nels; el++) { if ((colsfromi[el] >= context->row_map->goffs[me]) && (colsfromi[el] < (context->row_map->goffs[me]+context->row_map->ldim[me]))) { colsfromi[el] = context->row_map->glb_perm[colsfromi[el]-context->row_map->goffs[me]]; } else { colsfromi[el] = 0; } } if (i==me) { MPI_Reduce(MPI_IN_PLACE,colsfromi,nels,ghost_mpi_dt_gidx,MPI_MAX,i,context->mpicomm); } else { MPI_Reduce(colsfromi,NULL,nels,ghost_mpi_dt_gidx,MPI_MAX,i,context->mpicomm); } if (i==me) { if (context->row_map->loc_perm) { for (el=0; el<nels; el++) { if ((colsfromi[el] >= context->row_map->goffs[me]) && (colsfromi[el] < context->row_map->goffs[me]+context->row_map->ldim[me])) { col[el] = context->row_map->goffs[me] + context->row_map->loc_perm[colsfromi[el]-context->row_map->goffs[me]]; } else { col[el] = colsfromi[el]; } } } else { memcpy(col,colsfromi,nels*sizeof(ghost_gidx)); } } free(colsfromi); } GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_COMMUNICATION); #else GHOST_ERROR_LOG("This function should not have been called without MPI!"); UNUSED(col); UNUSED(ncols); UNUSED(context); #endif return GHOST_SUCCESS; } ghost_error ghost_sparsemat_nrows(ghost_gidx *nrows, ghost_sparsemat *mat) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); if (!nrows) { GHOST_ERROR_LOG("NULL pointer"); return GHOST_ERR_INVALID_ARG; } *nrows = mat->context->row_map->gdim; GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; } ghost_error ghost_sparsemat_nnz(ghost_gidx *nnz, ghost_sparsemat *mat) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); if (!nnz) { GHOST_ERROR_LOG("NULL pointer"); return GHOST_ERR_INVALID_ARG; } /* ghost_gidx lnnz = SPM_NNZ(mat); * * #ifdef GHOST_HAVE_MPI * MPI_CALL_RETURN(MPI_Allreduce(&lnnz,nnz,1,ghost_mpi_dt_gidx,MPI_SUM,mat->context->mpicomm)); * #else *nnz = lnnz; * #endif */ *nnz = mat->context->gnnz; GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; } ghost_error ghost_sparsemat_info_string(char **str, ghost_sparsemat *mat) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_CALL_RETURN(ghost_malloc((void **)str,1)); memset(*str,'\0',1); int myrank; ghost_gidx nrows = 0; ghost_gidx nnz = 0; GHOST_CALL_RETURN(ghost_sparsemat_nrows(&nrows,mat)); GHOST_CALL_RETURN(ghost_sparsemat_nnz(&nnz,mat)); GHOST_CALL_RETURN(ghost_rank(&myrank, mat->context->mpicomm)); char *matrixLocation; if (mat->traits.flags & GHOST_SPARSEMAT_DEVICE) matrixLocation = "Device"; else if (mat->traits.flags & GHOST_SPARSEMAT_HOST) matrixLocation = "Host"; else matrixLocation = "Default"; ghost_header_string(str,"%s @ rank %d",mat->name,myrank); ghost_line_string(str,"Data type",NULL,"%s",ghost_datatype_string(mat->traits.datatype)); ghost_line_string(str,"Matrix location",NULL,"%s",matrixLocation); ghost_line_string(str,"Total number of rows",NULL,"%"PRGIDX,nrows); ghost_line_string(str,"Total number of nonzeros",NULL,"%"PRGIDX,nnz); ghost_line_string(str,"Avg. nonzeros per row",NULL,"%.3f",(double)nnz/nrows); ghost_line_string(str,"Bandwidth",NULL,"%"PRGIDX,mat->context->bandwidth); ghost_line_string(str,"Avg. row band",NULL,"%.3f",mat->avgRowBand); ghost_line_string(str,"Avg. avg. row band",NULL,"%.3f",mat->avgAvgRowBand); ghost_line_string(str,"Smart row band",NULL,"%.3f",mat->smartRowBand); ghost_line_string(str,"Local number of rows",NULL,"%"PRLIDX,SPM_NROWS(mat)); ghost_line_string(str,"Local number of rows (padded)",NULL,"%"PRLIDX,SPM_NROWSPAD(mat)); ghost_line_string(str,"Local number of nonzeros",NULL,"%"PRLIDX,SPM_NNZ(mat)); ghost_line_string(str,"Full matrix format",NULL,"%s",ghost_sparsemat_formatName(mat)); if (mat->localPart) { ghost_line_string(str,"Local matrix format",NULL,"%s",ghost_sparsemat_formatName(mat->localPart)); ghost_line_string(str,"Local matrix symmetry",NULL,"%s",ghost_sparsemat_symmetry_string(mat->localPart->traits.symmetry)); ghost_line_string(str,"Local matrix size","MB","%u",ghost_sparsemat_bytesize(mat->localPart)/(1024*1024)); } if (mat->remotePart) { ghost_line_string(str,"Remote matrix format",NULL,"%s",ghost_sparsemat_formatName(mat->remotePart)); ghost_line_string(str,"Remote matrix size","MB","%u",ghost_sparsemat_bytesize(mat->remotePart)/(1024*1024)); } ghost_line_string(str,"Full matrix size","MB","%u",ghost_sparsemat_bytesize(mat)/(1024*1024)); if (mat->context->row_map->loc_perm || mat->context->row_map->glb_perm) { ghost_line_string(str,"Permuted",NULL,"Yes"); if (mat->context->row_map->glb_perm) { if (mat->context->row_map->loc_perm) { ghost_line_string(str,"Permutation scope",NULL,"Global+local"); } else { ghost_line_string(str,"Permutation scope",NULL,"Global"); } if (mat->traits.flags & GHOST_SPARSEMAT_SCOTCHIFY) { ghost_line_string(str,"Global permutation strategy",NULL,"SCOTCH"); ghost_line_string(str,"SCOTCH ordering strategy",NULL,"%s",mat->traits.scotchStrat); } if (mat->traits.flags & GHOST_SPARSEMAT_ZOLTAN) { ghost_line_string(str,"Global permutation strategy",NULL,"ZOLTAN"); } } else if (mat->context->row_map->loc_perm) { ghost_line_string(str,"Permutation scope",NULL,"Local"); } if (mat->context->row_map->loc_perm) { if (mat->traits.sortScope > 1) { if (mat->traits.flags & GHOST_SPARSEMAT_RCM) { ghost_line_string(str,"Local permutation strategy",NULL,"RCM+Sorting"); } else { ghost_line_string(str,"Local permutation strategy",NULL,"Sorting"); } } else if (mat->traits.flags & GHOST_SPARSEMAT_RCM) { ghost_line_string(str,"Local permutation strategy",NULL,"RCM"); } } ghost_line_string(str,"Permuted column indices",NULL,"%s",mat->traits.flags&GHOST_SPARSEMAT_NOT_PERMUTE_COLS?"No":"Yes"); } else { ghost_line_string(str,"Permuted",NULL,"No"); } ghost_line_string(str,"Row length sorting scope (sigma)",NULL,"%d",mat->traits.sortScope); ghost_line_string(str,"Ascending columns in row",NULL,"%s",mat->traits.flags&GHOST_SPARSEMAT_NOT_SORT_COLS?"Maybe":"Yes"); ghost_line_string(str,"Max row length (# rows)",NULL,"%d (%d)",mat->maxRowLen,mat->nMaxRows); ghost_line_string(str,"Row length variance",NULL,"%f",mat->variance); ghost_line_string(str,"Row length standard deviation",NULL,"%f",mat->deviation); ghost_line_string(str,"Row length coefficient of variation",NULL,"%f",mat->cv); ghost_line_string(str,"Chunk height (C)",NULL,"%d",mat->traits.C); ghost_line_string(str,"Chunk occupancy (beta)",NULL,"%f",(double)(SPM_NNZ(mat))/(double)(mat->nEnts)); ghost_line_string(str,"Threads per row (T)",NULL,"%d",mat->traits.T); ghost_footer_string(str); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return GHOST_SUCCESS; } ghost_error ghost_sparsematofile_header(ghost_sparsemat *mat, char *path) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_IO); ghost_gidx mnrows,mncols,mnnz; GHOST_CALL_RETURN(ghost_sparsemat_nrows(&mnrows,mat)); mncols = mnrows; GHOST_CALL_RETURN(ghost_sparsemat_nnz(&mnnz,mat)); int32_t endianess = ghost_machine_bigendian(); int32_t version = 1; int32_t base = 0; int32_t symmetry = GHOST_BINCRS_SYMM_GENERAL; int32_t datatype = mat->traits.datatype; int64_t nrows = (int64_t)mnrows; int64_t ncols = (int64_t)mncols; int64_t nnz = (int64_t)mnnz; size_t ret; FILE *filed; if ((filed = fopen64(path, "w")) == NULL){ GHOST_ERROR_LOG("Could not open binary CRS file %s: %s",path,strerror(errno)); return GHOST_ERR_IO; } if ((ret = fwrite(&endianess,sizeof(endianess),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&version,sizeof(version),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&base,sizeof(base),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&symmetry,sizeof(symmetry),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&datatype,sizeof(datatype),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&nrows,sizeof(nrows),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&ncols,sizeof(ncols),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } if ((ret = fwrite(&nnz,sizeof(nnz),1,filed)) != 1) { GHOST_ERROR_LOG("fwrite failed: %zu",ret); fclose(filed); return GHOST_ERR_IO; } fclose(filed); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_IO); return GHOST_SUCCESS; } bool ghost_sparsemat_symmetry_valid(ghost_sparsemat_symmetry symmetry) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); if ((symmetry & (ghost_sparsemat_symmetry)GHOST_SPARSEMAT_SYMM_GENERAL) && (symmetry & ~(ghost_sparsemat_symmetry)GHOST_SPARSEMAT_SYMM_GENERAL)) return 0; if ((symmetry & (ghost_sparsemat_symmetry)GHOST_SPARSEMAT_SYMM_SYMMETRIC) && (symmetry & ~(ghost_sparsemat_symmetry)GHOST_SPARSEMAT_SYMM_SYMMETRIC)) return 0; return 1; } const char * ghost_sparsemat_symmetry_string(ghost_sparsemat_symmetry symmetry) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); if (symmetry & GHOST_SPARSEMAT_SYMM_GENERAL) return "General"; if (symmetry & GHOST_SPARSEMAT_SYMM_SYMMETRIC) return "Symmetric"; if (symmetry & GHOST_SPARSEMAT_SYMM_SKEW_SYMMETRIC) { if (symmetry & GHOST_SPARSEMAT_SYMM_HERMITIAN) return "Skew-hermitian"; else return "Skew-symmetric"; } else { if (symmetry & GHOST_SPARSEMAT_SYMM_HERMITIAN) return "Hermitian"; } return "Invalid"; } void ghost_sparsemat_destroy(ghost_sparsemat *mat) { if (!mat) { return; } GHOST_FUNC_ENTER(GHOST_FUNCTYPE_TEARDOWN); #ifdef GHOST_HAVE_CUDA if (mat->traits.flags & GHOST_SPARSEMAT_DEVICE) { ghost_cu_free(mat->cu_rowLen); ghost_cu_free(mat->cu_rowLenPadded); ghost_cu_free(mat->cu_col); ghost_cu_free(mat->cu_val); ghost_cu_free(mat->cu_chunkStart); ghost_cu_free(mat->cu_chunkLen); } #endif free(mat->val); mat->val = NULL; free(mat->col); mat->col = NULL; free(mat->chunkStart); mat->chunkStart = NULL; free(mat->chunkMin); mat->chunkMin = NULL; free(mat->chunkLen); mat->chunkLen = NULL; free(mat->chunkLenPadded); mat->chunkLenPadded = NULL; free(mat->rowLen); mat->rowLen = NULL; free(mat->rowLen2); mat->rowLen2 = NULL; free(mat->rowLen4); mat->rowLen4 = NULL; free(mat->rowLenPadded); mat->rowLenPadded = NULL; mat->context->nmats--; if (mat->context->nmats == 0) { ghost_context_destroy(mat->context); } if (mat->localPart) { ghost_sparsemat_destroy(mat->localPart); } if (mat->remotePart) { ghost_sparsemat_destroy(mat->remotePart); } free(mat->col_orig); mat->col_orig = NULL; free(mat); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_TEARDOWN); } ghost_error ghost_sparsemat_init_bin(ghost_sparsemat *mat, char *path, ghost_mpi_comm mpicomm, double weight) { GHOST_PERFWARNING_LOG("The current implementation of binCRS read-in is " "inefficient in terms of memory consumption!"); GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_IO); ghost_error ret = GHOST_SUCCESS; ghost_sparsemat_rowfunc_file_initargs args; ghost_gidx dim[2]; ghost_lidx bincrs_dt = 0; // or use args.dt directly... ghost_sparsemat_src_rowfunc src = GHOST_SPARSEMAT_SRC_ROWFUNC_INITIALIZER; src.func = &ghost_sparsemat_rowfunc_bincrs; src.arg = &args; args.mat = mat; args.filename = path; if (src.func(GHOST_SPARSEMAT_ROWFUNC_BINCRS_ROW_GETDIM,&bincrs_dt,dim,NULL,src.arg)) { GHOST_ERROR_LOG("Error in matrix creation function"); ret = GHOST_ERR_UNKNOWN; goto err; } // Apply file datatype only if still unspecified. if(mat->traits.datatype == GHOST_DT_NONE) mat->traits.datatype = (ghost_datatype)bincrs_dt; // Require valid datatype here. GHOST_CALL_GOTO(ghost_datatype_size(&mat->elSize,mat->traits.datatype),err,ret); args.dt = mat->traits.datatype; src.gnrows = dim[0]; src.gncols = dim[1]; src.maxrowlen = dim[1]; GHOST_CALL_GOTO(ghost_sparsemat_init_rowfunc(mat,&src,mpicomm,weight),err,ret); goto out; err: out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_IO); return ret; } ghost_error ghost_sparsemat_init_mm(ghost_sparsemat *mat, char *path, ghost_mpi_comm mpicomm, double weight) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_IO); ghost_error ret = GHOST_SUCCESS; ghost_sparsemat_rowfunc_file_initargs args; ghost_gidx dim[2]; ghost_lidx bincrs_dt = 0; ghost_sparsemat_src_rowfunc src = GHOST_SPARSEMAT_SRC_ROWFUNC_INITIALIZER; if (mat->traits.flags & GHOST_SPARSEMAT_TRANSPOSE_MM) { src.func = &ghost_sparsemat_rowfunc_mm_transpose; } else { src.func = &ghost_sparsemat_rowfunc_mm; } src.arg = &args; args.filename = path; args.mat = mat; if (src.func(GHOST_SPARSEMAT_ROWFUNC_MM_ROW_GETDIM,&bincrs_dt,dim,NULL,src.arg)) { GHOST_ERROR_LOG("Error in matrix creation function"); ret = GHOST_ERR_UNKNOWN; goto err; } // Construct final datatype. if(mat->traits.datatype == GHOST_DT_NONE) mat->traits.datatype = GHOST_DT_DOUBLE; if((mat->traits.datatype == GHOST_DT_DOUBLE) || (mat->traits.datatype == GHOST_DT_FLOAT)) mat->traits.datatype |= (ghost_datatype)bincrs_dt; GHOST_CALL_GOTO(ghost_datatype_size(&mat->elSize,mat->traits.datatype),err,ret); args.dt = mat->traits.datatype; src.gnrows = dim[0]; src.gncols = dim[1]; src.maxrowlen = dim[1]; GHOST_CALL_GOTO(ghost_sparsemat_init_rowfunc(mat,&src,mpicomm,weight),err,ret); goto out; err: out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION|GHOST_FUNCTYPE_IO); return ret; } extern inline int ghost_sparsemat_rowfunc_crs(ghost_gidx row, ghost_lidx *rowlen, ghost_gidx *col, void *val, void *arg); ghost_error ghost_sparsemat_init_crs(ghost_sparsemat *mat, ghost_gidx offs, ghost_lidx n, ghost_gidx *col, void *val, ghost_lidx *rpt, ghost_mpi_comm mpicomm, double weight) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION); ghost_error ret = GHOST_SUCCESS; ghost_sparsemat_rowfunc_crs_arg args; // Require valid datatpye here. GHOST_CALL_GOTO(ghost_datatype_size(&mat->elSize,mat->traits.datatype),err,ret); args.dtsize = mat->elSize; args.col = col; args.val = val; args.rpt = rpt; args.offs = offs; ghost_sparsemat_src_rowfunc src = GHOST_SPARSEMAT_SRC_ROWFUNC_INITIALIZER; src.func = &ghost_sparsemat_rowfunc_crs; src.arg = &args; src.maxrowlen = n; GHOST_CALL_GOTO(ghost_sparsemat_init_rowfunc(mat,&src,mpicomm,weight),err,ret); goto out; err: out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION); return ret; } static const char * ghost_sparsemat_formatName(ghost_sparsemat *mat) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); // TODO format SELL-C-sigma UNUSED(mat); return "SELL"; } size_t ghost_sparsemat_bytesize (ghost_sparsemat *mat) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_UTIL); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_UTIL); return (size_t)((SPM_NROWSPAD(mat)/mat->traits.C)*sizeof(ghost_lidx) + mat->nEnts*(sizeof(ghost_lidx)+mat->elSize)); } static ghost_error initHaloAvg(ghost_sparsemat *mat) { ghost_error ret = GHOST_SUCCESS; int me,nprocs; GHOST_CALL_GOTO(ghost_rank(&me,mat->context->mpicomm),err,ret); GHOST_CALL_GOTO(ghost_nrank(&nprocs, mat->context->mpicomm),err,ret); ghost_context *ctx = mat->context; ghost_lidx ctx_nrowspadded = ctx->col_map->dim; bool *compression_flag; int *temp_nrankspresent; //calculate rankspresent here and store it, no need to do this each time averaging is done GHOST_CALL_GOTO(ghost_malloc((void **)&temp_nrankspresent, ctx_nrowspadded*sizeof(int)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&compression_flag, ctx_nrowspadded*sizeof(bool)),err,ret); #pragma omp parallel for schedule(runtime) for (int i=0; i<ctx_nrowspadded; i++) { if(ctx->col_map->loc_perm) { if(ctx->col_map->loc_perm_inv[i]< ctx->row_map->dim ) { //This check is important since entsInCol has only lnrows(NO_DISTINCTION //might give seg fault else) the rest are halo anyway, not needed for local sums temp_nrankspresent[i] = ctx->entsInCol[ctx->col_map->loc_perm_inv[i]]?1:0; //this has also to be permuted since it was } else { //temp_nrankspresent[i] = 0;//ctx->entsInCol[i]?1:0; } } else { if(i < ctx->row_map->ldim[me]) { temp_nrankspresent[i] = ctx->entsInCol[i]?1:0; //this has also to be permuted since it was } else { temp_nrankspresent[i] = 0; } } compression_flag[i] = false; } ghost_lidx ndues = 0; for (int i=0; i<nprocs; i++) { if(ctx->row_map->loc_perm) { #pragma omp parallel for schedule(runtime) for (int d=0 ;d < ctx->dues[i]; d++) { temp_nrankspresent[ctx->col_map->loc_perm[ctx->duelist[i][d]]]++; compression_flag[ctx->col_map->loc_perm[ctx->duelist[i][d]]] = true; } } else { #pragma omp parallel for schedule(runtime) for (int d=0 ;d < ctx->dues[i]; d++) { temp_nrankspresent[ctx->duelist[i][d]]++; compression_flag[ctx->duelist[i][d]] = true; } } ndues += ctx->dues[i]; } ghost_lidx *temp_avg_ptr; GHOST_CALL_GOTO(ghost_malloc((void **)&temp_avg_ptr, ctx_nrowspadded*sizeof(ghost_lidx)),err,ret); ghost_lidx ctr = 0; //count number of elements for(ghost_lidx i=0; i<ctx_nrowspadded; ++i) { if(ctr==0 && compression_flag[i]==true){ temp_avg_ptr[ctr] = i; ctr += 1; } else if(ctr!=0 && (compression_flag[i-1]!=compression_flag[i])) { temp_avg_ptr[ctr] = i; ctr += 1; } else if(i==ctx_nrowspadded-1 && (compression_flag[i-1]==true)) { temp_avg_ptr[ctr] = i+1; ctr += 1; } } ghost_lidx totalElem = 0; for(ghost_lidx i=0; i<ctr/2; ++i) { totalElem += (temp_avg_ptr[2*i+1]-temp_avg_ptr[2*i]); } ctx->nChunkAvg = ctr/2; ctx->nElemAvg = totalElem; //printf("Nchunk(%d) = %d, Total Elem(%d) = %d\n",me,ctx->nChunkAvg,me,totalElem); GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->avg_ptr, ctr*sizeof(ghost_lidx)),err,ret); //now have a compressed column pointer for averaging #pragma omp parallel for schedule(runtime) for(int i=0; i<ctr; ++i) { ctx->avg_ptr[i] = temp_avg_ptr[i]; //printf("pointers[%d] = %d\n",i,temp_avg_ptr[i]); } ghost_lidx *map; //map from original column to compressed column GHOST_CALL_GOTO(ghost_malloc((void **)&map, ctx_nrowspadded*sizeof(ghost_lidx)),err,ret); ghost_lidx col_ctr = 0; GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->nrankspresent, totalElem*sizeof(ghost_lidx)),err,ret); for(ghost_lidx i=0; i<ctx->nChunkAvg; ++i) { for(ghost_lidx j=ctx->avg_ptr[2*i]; j<ctx->avg_ptr[2*i+1]; ++j) { ctx->nrankspresent[col_ctr] = temp_nrankspresent[j]; //printf("nrankspresent[%d] = %d\n",col_ctr,ctx->nrankspresent[col_ctr]); map[j] = col_ctr; ++col_ctr; } } ctx->mapAvg = map; //now get a mapped duelist GHOST_CALL_GOTO(ghost_malloc((void **)&ctx->mappedDuelist, ndues*sizeof(ghost_lidx)),err,ret); ctr = 0; for (int i=0; i<nprocs; i++) { if(ctx->row_map->loc_perm) { for (int d=0 ;d < ctx->dues[i]; d++) { ctx->mappedDuelist[ctr] = map[ ctx->col_map->loc_perm[ctx->duelist[i][d]] ]; ++ctr; } } else { for (int d=0 ;d < ctx->dues[i]; d++) { ctx->mappedDuelist[ctr] = map[ ctx->duelist[i][d] ]; ++ctr; } } } /*printf("Mapped Due List(%d) = \n",me); * for (int i=0; i<nprocs; i++) { * for (int d=0 ;d < ctx->dues[i]; d++) { * if(ctx->perm_local) * printf("%d -> %d\n",ctx->perm_local->colPerm[ctx->duelist[i][d]], map[ctx->perm_local->colPerm[ctx->duelist[i][d]]]); * else * printf("%d -> %d\n",ctx->duelist[i][d], map[ctx->duelist[i][d]]); } } */ free(temp_avg_ptr); free(temp_nrankspresent); goto out; err: GHOST_ERROR_LOG("ERROR in initHaloAvg"); return GHOST_ERR_MPI; out: return ret; } ghost_error ghost_sparsemat_init_rowfunc(ghost_sparsemat *mat, ghost_sparsemat_src_rowfunc *src, ghost_mpi_comm mpicomm, double weight) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION); ghost_error ret = GHOST_SUCCESS; int me,nprocs; GHOST_CALL_GOTO(ghost_nrank(&nprocs, mpicomm),err,ret); if (!(mat->context)) { ghost_context_create(&(mat->context),src->gnrows,src->gncols,GHOST_CONTEXT_DEFAULT,mpicomm,weight); } if (!mat->context->row_map->dim) { ghost_map_create_distribution(mat->context->row_map,src,mat->context->weight,GHOST_MAP_DIST_NROWS,NULL); } if (!mat->context->col_map->dim) { ghost_map_create_distribution(mat->context->col_map,src,mat->context->weight,GHOST_MAP_DIST_NROWS,NULL); } if (mat->traits.flags & GHOST_SPARSEMAT_PERM_NO_DISTINCTION) { mat->context->col_map->flags = (ghost_map_flags)(mat->context->col_map->flags&GHOST_PERM_NO_DISTINCTION); } if (mat->traits.C == GHOST_SELL_CHUNKHEIGHT_ELLPACK) { mat->traits.C = PAD(SPM_NROWS(mat),GHOST_PAD_MAX); } else if (mat->traits.C == GHOST_SELL_CHUNKHEIGHT_AUTO){ mat->traits.C = 32; // TODO } mat->nchunks = CEILDIV(SPM_NROWS(mat),mat->traits.C); //GHOST_ERROR_LOG("set no_distinction"); //mat->context->flags = mat->context->flags | GHOST_PERM_NO_DISTINCTION; if (mat->context->row_map->dimpad == mat->context->row_map->dim) { mat->context->row_map->dimpad = PAD(SPM_NROWS(mat),ghost_densemat_row_padding()); } if (mat->context->col_map->dimpad == mat->context->col_map->dim) { mat->context->col_map->dimpad = PAD(mat->context->col_map->dim,ghost_densemat_row_padding()); } ghost_lidx nChunks = CEILDIV(SPM_NROWS(mat),mat->traits.C); // Require valid datatpye here. GHOST_CALL_GOTO(ghost_datatype_size(&mat->elSize,mat->traits.datatype),err,ret); if (!mat->chunkMin) GHOST_CALL_GOTO(ghost_malloc((void **)&mat->chunkMin, (nChunks)*sizeof(ghost_lidx)),err,ret); if (!mat->chunkLen) GHOST_CALL_GOTO(ghost_malloc((void **)&mat->chunkLen, (nChunks)*sizeof(ghost_lidx)),err,ret); if (!mat->chunkLenPadded) GHOST_CALL_GOTO(ghost_malloc((void **)&mat->chunkLenPadded, (nChunks)*sizeof(ghost_lidx)),err,ret); if (!mat->rowLen) GHOST_CALL_GOTO(ghost_malloc((void **)&mat->rowLen, (SPM_NROWSPAD(mat))*sizeof(ghost_lidx)),err,ret); if (!mat->rowLenPadded) GHOST_CALL_GOTO(ghost_malloc((void **)&mat->rowLenPadded, (SPM_NROWSPAD(mat))*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_rank(&me,mat->context->mpicomm),err,ret); GHOST_CALL_GOTO(ghost_nrank(&nprocs, mat->context->mpicomm),err,ret); //set NO_DISTINCTION when block multicolor and RCM is on and more than 2 processors, TODO pure MC and MPI //this has to be invoked even if no permutations are carried out and more than 2 processors, since we need to //know amount of remote entries before (used in sparsemat_blockcolor); if(nprocs>1 && mat->traits.flags & GHOST_SOLVER_KACZ) { GHOST_INFO_LOG("NO DISTINCTION is set"); mat->context->flags |= (ghost_context_flags_t) GHOST_PERM_NO_DISTINCTION; } if (mat->traits.flags & GHOST_SPARSEMAT_DIAG_FIRST) { mat->traits.flags |= (ghost_sparsemat_flags)GHOST_SPARSEMAT_NOT_SORT_COLS; } //mat->context->nrowspadded = PAD(mat->context->row_map->ldim[me],ghost_densemat_row_padding()); ghost_lidx *rl = mat->rowLen; ghost_lidx *rlp = mat->rowLenPadded; ghost_lidx *cl = mat->chunkLen; ghost_lidx *clp = mat->chunkLenPadded; ghost_lidx ** chunkptr = &(mat->chunkStart); char **val = &(mat->val); ghost_gidx **col = &(mat->col_orig); ghost_lidx C = mat->traits.C; ghost_lidx P = mat->traits.T; int funcerrs = 0; char *tmpval = NULL; ghost_gidx *tmpcol = NULL; ghost_lidx i,row,chunk,colidx; ghost_gidx gnents = 0, gnnz = 0; ghost_lidx maxRowLenInChunk = 0, maxRowLen = 0, privateMaxRowLen = 0; GHOST_CALL_GOTO(ghost_rank(&me, mat->context->mpicomm),err,ret); /*if(mat->context->flags & GHOST_PERM_NO_DISTINCTION) * SPM_NCOLS(mat) = mat->context->nrowspadded; * else */ #ifdef GHOST_SPARSEMAT_GLOBALSTATS GHOST_CALL_GOTO(ghost_malloc((void **)&(mat->nzDist),sizeof(ghost_gidx)*(2*mat->context->row_map->gdim-1)),err,ret); memset(mat->nzDist,0,sizeof(ghost_gidx)*(2*mat->context->row_map->gdim-1)); #endif mat->context->lowerBandwidth = 0; mat->context->upperBandwidth = 0; if (mat->traits.sortScope > 1) { mat->traits.flags = (ghost_sparsemat_flags)(mat->traits.flags|GHOST_SPARSEMAT_SORT_ROWS); } // _Only_ global permutation: // Create dummymat without any permutation and create global permutation // based on this dummymat if ((mat->traits.flags & GHOST_SPARSEMAT_PERM_ANY_GLOBAL) && (!(mat->traits.flags & GHOST_SPARSEMAT_PERM_ANY_LOCAL) && !(mat->traits.flags & GHOST_SOLVER_KACZ))) { ghost_sparsemat *dummymat = NULL; ghost_sparsemat_traits mtraits = mat->traits; mtraits.flags = (ghost_sparsemat_flags)(mtraits.flags & ~(GHOST_SPARSEMAT_PERM_ANY_GLOBAL)); mtraits.flags = (ghost_sparsemat_flags)(mtraits.flags | GHOST_SPARSEMAT_SAVE_ORIG_COLS); mtraits.C = 1; mtraits.sortScope = 1; ghost_sparsemat_create(&dummymat,NULL,&mtraits,1); ghost_sparsemat_init_rowfunc(dummymat,src,mat->context->mpicomm,mat->context->weight); if (mat->traits.flags & GHOST_SPARSEMAT_SCOTCHIFY) { ghost_sparsemat_perm_scotch(mat->context,dummymat); } if (mat->traits.flags & GHOST_SPARSEMAT_ZOLTAN) { ghost_sparsemat_perm_zoltan(mat->context,dummymat); } ghost_sparsemat_destroy(dummymat); } // Any combination of only local or global+local permutations: // Create dummymat with global permutations only and create local // permutations based on this dummymat else if (mat->traits.flags & GHOST_SPARSEMAT_PERM_ANY || mat->traits.flags & GHOST_SOLVER_KACZ) { ghost_sparsemat *dummymat = NULL; ghost_sparsemat_traits mtraits = mat->traits; mtraits.flags = (ghost_sparsemat_flags)(mtraits.flags & ~(GHOST_SPARSEMAT_PERM_ANY_LOCAL)); mtraits.flags = (ghost_sparsemat_flags)(mtraits.flags & ~(GHOST_SOLVER_KACZ)); mtraits.flags = (ghost_sparsemat_flags)(mtraits.flags | GHOST_SPARSEMAT_SAVE_ORIG_COLS); if ((mat->traits.flags & GHOST_SOLVER_KACZ) && (nprocs > 1)) { mtraits.flags = (ghost_sparsemat_flags)(mtraits.flags | GHOST_SPARSEMAT_PERM_NO_DISTINCTION); } mtraits.C = 1; mtraits.sortScope = 1; ghost_sparsemat_create(&dummymat,NULL,&mtraits,1); ghost_sparsemat_init_rowfunc(dummymat,src,mat->context->mpicomm,mat->context->weight); if (mat->traits.flags & GHOST_SPARSEMAT_RCM) { ghost_sparsemat_perm_spmp(mat->context,dummymat); } if (mat->traits.flags & GHOST_SPARSEMAT_COLOR) { ghost_sparsemat_perm_color(mat->context,dummymat); } //blockcoloring needs to know bandwidth //TODO avoid 2 times calculating bandwidth, if no RCM or no bandwidth disturbing permutations are done //take this branch only if the matrix cannot be bandwidth bound, //else normal splitting with just RCM permutation would do the work //check whether BLOCKCOLOR is necessary, it is avoided if user explicitly request Multicoloring method if(mat->traits.flags & GHOST_SOLVER_KACZ) { ghost_set_kacz_ratio(mat->context,dummymat); if(mat->context->kaczRatio < mat->context->kacz_setting.active_threads && !(mat->traits.flags & GHOST_SPARSEMAT_COLOR)) { if(!(mat->traits.flags & GHOST_SPARSEMAT_PERM_ANY_LOCAL) && mat->context->kacz_setting.active_threads!=1){ GHOST_WARNING_LOG("GHOST: BMC is ON and this might lead to local row permutations; if you want to avoid it please decrease the number of threads to %d",MAX(1,(int)mat->context->kaczRatio)); } mat->traits.flags |= (ghost_sparsemat_flags)GHOST_SPARSEMAT_BLOCKCOLOR; } } if (mat->traits.flags & GHOST_SPARSEMAT_BLOCKCOLOR) { ghost_sparsemat_blockColor(mat->context,dummymat); } if (mat->traits.sortScope > 1) { ghost_sparsemat_perm_sort(mat->context,dummymat,mat->traits.sortScope); } ghost_sparsemat_destroy(dummymat); if (mat->context->row_map->loc_perm && mat->context->col_map->loc_perm == NULL) { mat->context->col_map->cu_loc_perm = mat->context->row_map->cu_loc_perm; mat->context->col_map->loc_perm = mat->context->row_map->loc_perm; mat->context->col_map->loc_perm_inv = mat->context->row_map->loc_perm_inv; } if (mat->context->row_map->glb_perm && mat->context->col_map->glb_perm == NULL) { mat->context->col_map->glb_perm = mat->context->row_map->glb_perm; mat->context->col_map->glb_perm_inv = mat->context->row_map->glb_perm_inv; } if (mat->traits.flags & GHOST_SPARSEMAT_NOT_SORT_COLS) { GHOST_PERFWARNING_LOG("Unsorted columns inside a row may yield to bad performance! However, matrix construnction will be faster."); } } else { if (mat->traits.sortScope > 1) { GHOST_WARNING_LOG("Ignoring sorting scope"); } mat->traits.flags |= (ghost_sparsemat_flags)GHOST_SPARSEMAT_NOT_PERMUTE_COLS; mat->traits.flags |= (ghost_sparsemat_flags)GHOST_SPARSEMAT_NOT_SORT_COLS; } if (src->func == ghost_sparsemat_rowfunc_bincrs || src->func == ghost_sparsemat_rowfunc_mm || src->func == ghost_sparsemat_rowfunc_mm_transpose) { if (src->func(GHOST_SPARSEMAT_ROWFUNC_INIT,NULL,NULL,NULL,src->arg)) { GHOST_ERROR_LOG("Error in matrix creation function"); ret = GHOST_ERR_UNKNOWN; goto err; } } ghost_lidx *tmpclp = NULL; if (!clp) { ghost_malloc((void **)&tmpclp,nChunks*sizeof(ghost_lidx)); clp = tmpclp; } ghost_lidx *tmprl = NULL; if (!rl) { ghost_malloc((void **)&tmprl,nChunks*sizeof(ghost_lidx)); rl = tmprl; } if (!(*chunkptr)) { GHOST_INSTR_START("rowlens"); GHOST_CALL_GOTO(ghost_malloc_align((void **)chunkptr,(nChunks+1)*sizeof(ghost_lidx),GHOST_DATA_ALIGNMENT),err,ret); } #pragma omp parallel private(i,colidx,tmpval,tmpcol,row,maxRowLenInChunk) reduction (+:gnents,gnnz,funcerrs) reduction (max:privateMaxRowLen) { ghost_lidx rowlen; maxRowLenInChunk = 0; GHOST_CALL(ghost_malloc((void **)&tmpval,src->maxrowlen*mat->elSize),ret); GHOST_CALL(ghost_malloc((void **)&tmpcol,src->maxrowlen*sizeof(ghost_gidx)),ret); /*if (!(mat->traits.flags & GHOST_SPARSEMAT_PERM_ANY) && src->func == ghost_sparsemat_rowfunc_crs) { * #pragma omp single * GHOST_INFO_LOG("Fast matrix construction for CRS source and no permutation") * #pragma omp for schedule(runtime) * for( chunk = 0; chunk < nChunks; chunk++ ) { * chunkptr[chunk] = 0; // NUMA init * for (i=0, row = chunk*C; i < C && row < SPM_NROWS(mat); i++, row++) { * * rowlen=((ghost_sparsemat_rowfunc_crs_arg *)src->arg)->rpt[mat->context->row_map->goffs[me]+row+1]-((ghost_sparsemat_rowfunc_crs_arg *)src->arg)->rpt[mat->context->row_map->goffs[me]+row]; * * // rl _must_ not be NULL because we need it for the statistics * rl[row] = rowlen; * * if (rlp) { * rlp[row] = PAD(rowlen,P); } gnnz += rowlen; maxRowLenInChunk = MAX(maxRowLenInChunk,rowlen); } if (cl) { cl[chunk] = maxRowLenInChunk; } // clp _must_ not be NULL because we need it for the chunkptr computation clp[chunk] = PAD(maxRowLenInChunk,P); gnents += clp[chunk]*C; privateMaxRowLen = MAX(privateMaxRowLen,maxRowLenInChunk); maxRowLenInChunk = 0; } } else {*/ // allocate local workspace void *work = NULL; if (src->funcinit) { src->funcinit(src->arg, &work); } #pragma omp for schedule(runtime) for( chunk = 0; chunk < nChunks; chunk++ ) { (*chunkptr)[chunk] = 0; // NUMA init for (i=0, row = chunk*C; (i < C) && (row < SPM_NROWS(mat)); i++, row++) { ghost_gidx callrow = 0; // row with which to call the rowfunc if (mat->context->row_map->glb_perm || mat->context->row_map->loc_perm) { if (mat->context->row_map->glb_perm && mat->context->row_map->loc_perm) { GHOST_INFO_LOG("Global _and_ local permutation"); callrow = mat->context->row_map->glb_perm_inv[mat->context->row_map->loc_perm_inv[row]]; } else if (mat->context->row_map->glb_perm) { callrow = mat->context->row_map->glb_perm_inv[row]; } else if (mat->context->row_map->loc_perm) { callrow = mat->context->row_map->goffs[me]+mat->context->row_map->loc_perm_inv[row]; } } else { callrow = mat->context->row_map->goffs[me]+row; } if (work) { funcerrs += src->func(callrow,&rowlen,tmpcol,tmpval,work); } else { funcerrs += src->func(callrow,&rowlen,tmpcol,tmpval,src->arg); } gnnz += rowlen; bool diagexists = 0; // check whether the diagonal exists, if not: create an artificial zero diagonal if (mat->traits.flags & GHOST_SPARSEMAT_DIAG_FIRST) { for (colidx=0; colidx<rowlen; colidx++) { if (tmpcol[colidx] == callrow) { diagexists = 1; } } if (!diagexists) { // need to add an artifial zero diagonal value rowlen++; } } // rl _must_ not be NULL because we need it for the statistics rl[row] = rowlen; if (rlp) { rlp[row] = PAD(rowlen,P); } maxRowLenInChunk = MAX(maxRowLenInChunk,rowlen); } if (cl) { cl[chunk] = maxRowLenInChunk; } // clp _must_ not be NULL because we need it for the chunkptr computation clp[chunk] = PAD(maxRowLenInChunk,P); gnents += clp[chunk]*C; privateMaxRowLen = MAX(privateMaxRowLen,maxRowLenInChunk); maxRowLenInChunk = 0; } // free local workspace if (src->funcinit) { src->funcinit(src->arg, &work); } free(tmpval); tmpval = NULL; free(tmpcol); tmpcol = NULL; } GHOST_INSTR_STOP("rowlens"); maxRowLen = privateMaxRowLen; mat->maxRowLen = maxRowLen; if (funcerrs) { GHOST_ERROR_LOG("Matrix construction function returned error"); ret = GHOST_ERR_UNKNOWN; goto err; } if (gnents > (ghost_gidx)GHOST_LIDX_MAX) { GHOST_ERROR_LOG("The local number of entries is too large: %"PRGIDX,gnents); return GHOST_ERR_DATATYPE; } if (gnnz > (ghost_gidx)GHOST_LIDX_MAX) { GHOST_ERROR_LOG("The local number of entries is too large: %"PRGIDX,gnents); return GHOST_ERR_DATATYPE; } SPM_NNZ(mat) = (ghost_lidx)gnnz; mat->nEnts = (ghost_lidx)gnents; GHOST_INSTR_START("chunkptr_init"); for(chunk = 0; chunk < nChunks; chunk++ ) { (*chunkptr)[chunk+1] = (*chunkptr)[chunk] + clp[chunk]*C; } GHOST_INSTR_STOP("chunkptr_init"); #ifdef GHOST_HAVE_MPI ghost_gidx fent = 0; for (i=0; i<nprocs; i++) { if (i>0 && me==i) { MPI_CALL_GOTO(MPI_Recv(&fent,1,ghost_mpi_dt_gidx,me-1,me-1,mat->context->mpicomm,MPI_STATUS_IGNORE),err,ret); } if (me==i && i<nprocs-1) { ghost_gidx send = fent+mat->nEnts; MPI_CALL_GOTO(MPI_Send(&send,1,ghost_mpi_dt_gidx,me+1,me,mat->context->mpicomm),err,ret); } } //MPI_CALL_GOTO(MPI_Allgather(&mat->nEnts,1,ghost_mpi_dt_lidx,mat->context->lnEnts,1,ghost_mpi_dt_lidx,mat->context->mpicomm),err,ret); //MPI_CALL_GOTO(MPI_Allgather(&fent,1,ghost_mpi_dt_gidx,mat->context->lfEnt,1,ghost_mpi_dt_gidx,mat->context->mpicomm),err,ret); MPI_CALL_GOTO(MPI_Allreduce(&gnnz,&(mat->context->gnnz),1,ghost_mpi_dt_gidx,MPI_SUM,mat->context->mpicomm),err,ret); #else mat->context->gnnz = gnnz; #endif /* if (src->maxrowlen != mat->maxRowLen) { GHOST_DEBUG_LOG(1,"The maximum row length was not correct. Setting it from %"PRLIDX" to %"PRGIDX,src->maxrowlen,mat->maxRowLen); src->maxrowlen = mat->maxRowLen; } */ bool readcols = 0; // we only need to read the columns the first time the matrix is created if (!(*val)) { GHOST_CALL_GOTO(ghost_malloc_align((void **)val,mat->elSize*(size_t)mat->nEnts,GHOST_DATA_ALIGNMENT),err,ret); } if (!(*col)) { GHOST_CALL_GOTO(ghost_malloc_align((void **)col,sizeof(ghost_gidx)*(size_t)mat->nEnts,GHOST_DATA_ALIGNMENT),err,ret); readcols = 1; } if (src->func == ghost_sparsemat_rowfunc_crs && mat->context->row_map->glb_perm) { GHOST_ERROR_LOG("Global permutation does not work with local CRS source"); } GHOST_INSTR_START("cols_and_vals"); #pragma omp parallel private(i,colidx,row,tmpval,tmpcol) { ghost_lidx rowlen; int funcret = 0; GHOST_CALL(ghost_malloc((void **)&tmpval,C*mat->maxRowLen*mat->elSize),ret); GHOST_CALL(ghost_malloc((void **)&tmpcol,C*mat->maxRowLen*sizeof(ghost_gidx)),ret); // allocate local workspace void *work = NULL; if (src->funcinit) { src->funcinit(src->arg, &work); } if (src->func == ghost_sparsemat_rowfunc_crs) { ghost_gidx *crscol; char *crsval = (char *)(((ghost_sparsemat_rowfunc_crs_arg *)src->arg)->val); ghost_lidx *crsrpt = ((ghost_sparsemat_rowfunc_crs_arg *)src->arg)->rpt; #pragma omp single GHOST_INFO_LOG("Fast matrix construction for CRS source and no permutation"); #pragma omp for schedule(runtime) for( chunk = 0; chunk < nChunks; chunk++ ) { //memset(tmpval,0,mat->elSize*src->maxrowlen*C); for (i=0, row = chunk*C; (i<C) && (chunk*C+i < SPM_NROWS(mat)); i++, row++) { ghost_gidx actualrow; if (mat->context->row_map->loc_perm || mat->context->row_map->glb_perm) { actualrow = mat->context->row_map->loc_perm_inv[row]; } else { actualrow = row; } crsval = &((char *)(((ghost_sparsemat_rowfunc_crs_arg *)src->arg)->val))[crsrpt[actualrow]*mat->elSize]; #pragma vector nontemporal for(colidx = 0; colidx<rl[row]; colidx++) { // assignment is much faster than memcpy with non-constant size, so we need those branches... if (mat->traits.datatype & GHOST_DT_REAL) { if (mat->traits.datatype & GHOST_DT_DOUBLE) { ((double *)(*val))[(*chunkptr)[chunk]+colidx*C+i] = ((double *)(crsval))[colidx]; } else { ((float *)(*val))[(*chunkptr)[chunk]+colidx*C+i] = ((float *)(crsval))[colidx]; } } else { if (mat->traits.datatype & GHOST_DT_DOUBLE) { ((complex double *)(*val))[(*chunkptr)[chunk]+colidx*C+i] = ((complex double *)(crsval))[colidx]; } else { ((complex float *)(*val))[(*chunkptr)[chunk]+colidx*C+i] = ((complex float *)(crsval))[colidx]; } } if (readcols) { crscol = &((ghost_sparsemat_rowfunc_crs_arg *)src->arg)->col[crsrpt[actualrow]]; if (mat->context->row_map->loc_perm || mat->context->row_map->glb_perm) { // local permutation: distinction between global and local entriess, if GHOST_PERM_NO_DISTINCTION is not set if ((mat->context->flags & GHOST_PERM_NO_DISTINCTION) || ( (crscol[colidx] >= mat->context->row_map->goffs[me]) && (crscol[colidx] < (mat->context->row_map->goffs[me]+SPM_NROWS(mat))) )) { // local entry: copy with permutation if (mat->traits.flags & GHOST_SPARSEMAT_NOT_PERMUTE_COLS) { (*col)[(*chunkptr)[chunk]+colidx*C+i] = crscol[colidx]; } else if(mat->context->flags & GHOST_PERM_NO_DISTINCTION) { (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->col_map->loc_perm[crscol[colidx]]; } else { (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->col_map->loc_perm[crscol[colidx]-mat->context->row_map->goffs[me]]+mat->context->row_map->goffs[me]; } } else { // remote entry: copy without permutation (*col)[(*chunkptr)[chunk]+colidx*C+i] = crscol[colidx]; } } else { (*col)[(*chunkptr)[chunk]+colidx*C+i] = crscol[colidx]; } } } for (; colidx < clp[chunk]; colidx++) { memset(&(*val)[((*chunkptr)[chunk]+colidx*C+i)*mat->elSize],0,mat->elSize); (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->row_map->goffs[me]; } } } } else { ghost_gidx callrow; #pragma omp for schedule(runtime) for (chunk = 0; chunk < nChunks; chunk++) { if(mat->context->flags & GHOST_PERM_NO_DISTINCTION) { memset(&(*col)[(*chunkptr)[chunk]],0,C*clp[chunk]*sizeof(ghost_gidx)); } else { for (i=0; i<C*clp[chunk]; i++) { (*col)[(*chunkptr)[chunk]] = mat->context->row_map->offs; } } memset(&(*val)[(*chunkptr)[chunk]*mat->elSize],0,C*clp[chunk]*mat->elSize); } #pragma omp for schedule(runtime) for (chunk = 0; chunk < nChunks; chunk++) { if(mat->context->flags & GHOST_PERM_NO_DISTINCTION) { memset(tmpcol,0,C*mat->maxRowLen*sizeof(ghost_gidx)); } else { for (i=0; i<C*mat->maxRowLen; i++) { tmpcol[i] = mat->context->row_map->offs; } } memset(tmpval,0,C*mat->maxRowLen*mat->elSize); for (i=0, row = chunk*C; (i<C) && (chunk*C+i < SPM_NROWS(mat)); i++, row++) { if (mat->context->row_map->glb_perm || mat->context->row_map->loc_perm) { if (mat->context->row_map->glb_perm && mat->context->row_map->loc_perm) { GHOST_INFO_LOG("Global _and_ local permutation"); callrow = mat->context->row_map->glb_perm_inv[mat->context->row_map->loc_perm_inv[row]]; } else if (mat->context->row_map->glb_perm) { callrow = mat->context->row_map->glb_perm_inv[row]; } else if (mat->context->row_map->loc_perm) { callrow = mat->context->row_map->goffs[me]+mat->context->row_map->loc_perm_inv[row]; } } else { callrow = mat->context->row_map->goffs[me]+row; } if (work) { funcret = src->func(callrow,&rowlen,&tmpcol[mat->maxRowLen*i],&tmpval[mat->maxRowLen*i*mat->elSize],work); } else { funcret = src->func(callrow,&rowlen,&tmpcol[mat->maxRowLen*i],&tmpval[mat->maxRowLen*i*mat->elSize],src->arg); } if (funcret) { GHOST_ERROR_LOG("Matrix construction function returned error"); ret = GHOST_ERR_UNKNOWN; } if (rowlen != rl[row]) { if (mat->traits.flags & GHOST_SPARSEMAT_DIAG_FIRST) { if (rowlen != rl[row]-1) { GHOST_ERROR_LOG("Row length mismatch in matrix construction for a DIAG_FIRST matrix!"); ret = GHOST_ERR_UNKNOWN; } else { tmpcol[i*mat->maxRowLen+rowlen] = callrow; // tmpval already set to zero } } else { GHOST_ERROR_LOG("Row length mismatch in matrix construction!"); ret = GHOST_ERR_UNKNOWN; } } for (colidx = 0; colidx<clp[chunk]; colidx++) { memcpy(*val+mat->elSize*((*chunkptr)[chunk]+colidx*C+i),&tmpval[mat->elSize*(i*mat->maxRowLen+colidx)],mat->elSize); if (mat->context->row_map->loc_perm || mat->context->row_map->glb_perm) { if (mat->context->row_map->glb_perm) { // no distinction between global and local entries // global permutation will be done after all rows are read (*col)[(*chunkptr)[chunk]+colidx*C+i] = tmpcol[i*mat->maxRowLen+colidx]; } else { // local permutation: distinction between global and local entries, if GHOST_PERM_NO_DISTINCTION is not set if ((mat->context->flags & GHOST_PERM_NO_DISTINCTION) || ((tmpcol[i*mat->maxRowLen+colidx] >= mat->context->row_map->goffs[me]) && (tmpcol[i*mat->maxRowLen+colidx] < (mat->context->row_map->goffs[me]+SPM_NROWS(mat))))) { // local entry: copy with permutation if (mat->traits.flags & GHOST_SPARSEMAT_NOT_PERMUTE_COLS) { (*col)[(*chunkptr)[chunk]+colidx*C+i] = tmpcol[i*mat->maxRowLen+colidx]; } else if(mat->context->flags & GHOST_PERM_NO_DISTINCTION) { // (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->row_map->loc_perm->colPerm[tmpcol[i*mat->maxRowLen+colidx]] // do not permute remote and do not allow local to go to remote if(tmpcol[i*mat->maxRowLen+colidx] < mat->context->col_map->dimpad) { if( mat->context->col_map->loc_perm[tmpcol[i*mat->maxRowLen+colidx]]>=mat->context->col_map->dimpad ) { GHOST_ERROR_LOG("Ensure you have halo number of paddings, since GHOST_PERM_NO_DISTINCTION is switched on"); } (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->col_map->loc_perm[tmpcol[i*mat->maxRowLen+colidx]]; } else { (*col)[(*chunkptr)[chunk]+colidx*C+i] = tmpcol[i*mat->maxRowLen+colidx]; } } else { (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->col_map->loc_perm[tmpcol[i*mat->maxRowLen+colidx]-mat->context->row_map->goffs[me]]+mat->context->row_map->goffs[me]; // (*col)[(*chunkptr)[chunk]+colidx*C+i] = mat->context->row_map->loc_perm->colPerm[tmpcol[i*mat->maxRowLen+colidx]-mat->context->row_map->goffs[me]]+mat->context->row_map->goffs[me]; } } else { // remote entry: copy without permutation (*col)[(*chunkptr)[chunk]+colidx*C+i] = tmpcol[i*mat->maxRowLen+colidx]; } } } else { (*col)[(*chunkptr)[chunk]+colidx*C+i] = tmpcol[i*mat->maxRowLen+colidx]; } } } } } // free local workspace if (src->funcinit) { src->funcinit(src->arg, &work); } free(tmpval); tmpval = NULL; free(tmpcol); tmpcol = NULL; } if (SPM_NROWS(mat) % C) { for (i=SPM_NROWS(mat)%C; i < C; i++) { for (colidx = 0; colidx<clp[nChunks-1]; colidx++) { (*col)[(*chunkptr)[nChunks-1]+colidx*C+i] = mat->context->row_map->goffs[me]; memset(*val+mat->elSize*((*chunkptr)[nChunks-1]+colidx*C+i),0,mat->elSize); } } } GHOST_INSTR_STOP("cols_and_vals"); if (mat->context->row_map->glb_perm) { ghost_sparsemat_perm_global_cols(*col,mat->nEnts,mat->context); } GHOST_INSTR_START("sort_and_register"); if (!(mat->traits.flags & GHOST_SPARSEMAT_NOT_SORT_COLS) || mat->traits.flags & GHOST_SPARSEMAT_DIAG_FIRST) { for( chunk = 0; chunk < nChunks; chunk++ ) { for (i=0; (i<C) && (chunk*C+i < SPM_NROWS(mat)); i++) { row = chunk*C+i; ghost_sparsemat_sortrow(&((*col)[(*chunkptr)[chunk]+i]),&(*val)[((*chunkptr)[chunk]+i)*mat->elSize],mat->elSize,rl[row],C,row,mat->traits.flags & GHOST_SPARSEMAT_DIAG_FIRST); #ifdef GHOST_SPARSEMAT_STATS ghost_sparsemat_registerrow(mat,mat->context->row_map->goffs[me]+row,&(*col)[(*chunkptr)[chunk]+i],rl[row],C); #endif } } } else { #ifdef GHOST_SPARSEMAT_STATS for( chunk = 0; chunk < nChunks; chunk++ ) { for (i=0; (i<C) && (chunk*C+i < SPM_NROWS(mat)); i++) { row = chunk*C+i; ghost_sparsemat_registerrow(mat,mat->context->row_map->goffs[me]+row,&(*col)[(*chunkptr)[chunk]+i],rl[row],C); } } #endif } #ifdef GHOST_SPARSEMAT_STATS ghost_sparsemat_registerrow_finalize(mat); #endif GHOST_INSTR_STOP("sort_and_register"); /* mat->context->lnEnts[me] = mat->nEnts; for (i=0; i<nprocs; i++) { mat->context->lfEnt[i] = 0; } for (i=1; i<nprocs; i++) { mat->context->lfEnt[i] = mat->context->lfEnt[i-1]+mat->context->lnEnts[i-1]; } */ free(tmpclp); free(tmprl); if (ret != GHOST_SUCCESS) { goto err; } GHOST_CALL_GOTO(ghost_sparsemat_split(mat),err,ret); if(mat->traits.flags & GHOST_SOLVER_KACZ) { //split transition zones if(mat->traits.flags & (ghost_sparsemat_flags)GHOST_SPARSEMAT_BLOCKCOLOR) { split_transition(mat); } //split if no splitting was done before and MC is off else if(!(mat->traits.flags & GHOST_SPARSEMAT_COLOR)) { if( (mat->context->kaczRatio >= 2*mat->context->kacz_setting.active_threads) ) { ghost_rcm_dissect(mat); } else { split_analytical(mat); } } } #ifdef GHOST_HAVE_CUDA if (!(mat->traits.flags & GHOST_SPARSEMAT_HOST)) ghost_sparsemat_upload(mat); #endif GHOST_CALL_GOTO(ghost_malloc((void **)&mat->rowLen2,SPM_NROWSPAD(mat)/2*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&mat->rowLen4,SPM_NROWSPAD(mat)/4*sizeof(ghost_lidx)),err,ret); ghost_lidx max4 = 0 , max2 = 0; for (i=0; i<SPM_NROWSPAD(mat); i++) { if (!(i%2)) { max2 = 0; } if (!(i%4)) { max4 = 0; } if (mat->rowLen[i] > max2) { max2 = mat->rowLen[i]; } if (mat->rowLen[i] > max4) { max4 = mat->rowLen[i]; } if (!((i+1)%2)) { mat->rowLen2[i/2] = max2; } if (!((i+1)%4)) { mat->rowLen4[i/4] = max4; } } if (src->func == ghost_sparsemat_rowfunc_bincrs || src->func == ghost_sparsemat_rowfunc_mm) { if (src->func(GHOST_SPARSEMAT_ROWFUNC_FINALIZE,NULL,NULL,NULL,src->arg)) { GHOST_ERROR_LOG("Error in matrix creation function"); ret = GHOST_ERR_UNKNOWN; goto err; } } goto out; err: free(mat->val); mat->val = NULL; free(mat->col_orig); mat->col_orig = NULL; free(mat->chunkMin); mat->chunkMin = NULL; free(mat->chunkLen); mat->chunkLen = NULL; free(mat->chunkLenPadded); mat->chunkLenPadded = NULL; free(mat->rowLen); mat->rowLen = NULL; free(mat->rowLen2); mat->rowLen2 = NULL; free(mat->rowLen4); mat->rowLen4 = NULL; free(mat->rowLenPadded); mat->rowLenPadded = NULL; free(mat->chunkStart); mat->chunkStart = NULL; mat->nEnts = 0; out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION); return ret; } static ghost_error ghost_sparsemat_split(ghost_sparsemat *mat) { if (!mat) { GHOST_ERROR_LOG("Matrix is NULL"); return GHOST_ERR_INVALID_ARG; } ghost_error ret = GHOST_SUCCESS; GHOST_FUNC_ENTER(GHOST_FUNCTYPE_INITIALIZATION); GHOST_DEBUG_LOG(1,"Splitting the SELL matrix into a local and remote part"); ghost_gidx i,j; int me,nproc; GHOST_CALL_RETURN(ghost_rank(&me, mat->context->mpicomm)); GHOST_CALL_RETURN(ghost_nrank(&nproc, mat->context->mpicomm)); ghost_lidx lnEnts_l, lnEnts_r; ghost_lidx current_l, current_r; ghost_lidx chunk; ghost_lidx idx, row; GHOST_INSTR_START("init_compressed_cols"); #ifdef GHOST_IDX_UNIFORM if (!(mat->traits.flags & GHOST_SPARSEMAT_SAVE_ORIG_COLS)) { GHOST_DEBUG_LOG(1,"In-place column compression!"); mat->col = mat->col_orig; } else #endif { if (!mat->col) { GHOST_DEBUG_LOG(1,"Duplicate col array!"); GHOST_CALL_GOTO(ghost_malloc_align((void **)&mat->col,sizeof(ghost_lidx)*mat->nEnts,GHOST_DATA_ALIGNMENT),err,ret); #pragma omp parallel for private(j) schedule(runtime) for (i=0; i<SPM_NCHUNKS(mat); i++) { for (j=mat->chunkStart[i]; j<mat->chunkStart[i+1]; j++) { mat->col[j] = 0; } } } } GHOST_INSTR_STOP("init_compressed_cols"); ghost_lidx nhalo; GHOST_CALL_GOTO(ghost_context_comm_init(mat->context,mat->col_orig,mat,mat->col,&nhalo),err,ret); if (nproc > 1) { if (mat->context->col_map->nhalo) { if (nhalo > mat->context->col_map->nhalo) { GHOST_ERROR_LOG("The maps are not compatible!"); ret = GHOST_ERR_INVALID_ARG; goto err; } } else { mat->context->col_map->nhalo = nhalo; if(mat->context->flags & GHOST_PERM_NO_DISTINCTION) { mat->context->col_map->dim = mat->context->col_map->dimpad+mat->context->col_map->nhalo; mat->context->col_map->dimhalo = mat->context->col_map->dimpad+2*mat->context->col_map->nhalo; mat->context->col_map->dimpad = PAD(mat->context->col_map->dimpad+2*mat->context->col_map->nhalo,ghost_densemat_row_padding()); initHaloAvg(mat); } else { mat->context->col_map->dimhalo = mat->context->col_map->dimpad+mat->context->col_map->nhalo; mat->context->col_map->dimpad = PAD(mat->context->col_map->dimpad+mat->context->col_map->nhalo,ghost_densemat_row_padding()); } } } #ifndef GHOST_IDX_UNIFORM if (!(mat->traits.flags & GHOST_SPARSEMAT_SAVE_ORIG_COLS)) { GHOST_DEBUG_LOG(1,"Free orig cols"); free(mat->col_orig); mat->col_orig = NULL; } #endif if (!(mat->traits.flags & GHOST_SPARSEMAT_NOT_STORE_SPLIT)) { // split computation GHOST_INSTR_START("split"); ghost_sparsemat_create(&(mat->localPart),mat->context,&mat->splittraits[0],1); ghost_sparsemat *localMat = mat->localPart; mat->localPart->traits.symmetry = mat->traits.symmetry; ghost_sparsemat_create(&(mat->remotePart),mat->context,&mat->splittraits[1],1); ghost_sparsemat *remoteMat = mat->remotePart; mat->localPart->traits.T = mat->traits.T; mat->remotePart->traits.T = mat->traits.T; mat->localPart->elSize = mat->elSize; mat->remotePart->elSize = mat->elSize; mat->localPart->nchunks = CEILDIV(SPM_NROWS(mat->localPart),mat->localPart->traits.C); mat->remotePart->nchunks = CEILDIV(SPM_NROWS(mat->remotePart),mat->remotePart->traits.C); ghost_lidx nChunks = SPM_NCHUNKS(mat); GHOST_CALL_GOTO(ghost_malloc((void **)&localMat->chunkStart, (nChunks+1)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&localMat->chunkMin, (nChunks)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&localMat->chunkLen, (nChunks)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&localMat->chunkLenPadded, (nChunks)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&localMat->rowLen, (SPM_NROWSPAD(mat))*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&localMat->rowLenPadded, (SPM_NROWSPAD(mat))*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&remoteMat->chunkStart, (nChunks+1)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&remoteMat->chunkMin, (nChunks)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&remoteMat->chunkLen, (nChunks)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&remoteMat->chunkLenPadded, (nChunks)*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&remoteMat->rowLen, (SPM_NROWSPAD(mat))*sizeof(ghost_lidx)),err,ret); GHOST_CALL_GOTO(ghost_malloc((void **)&remoteMat->rowLenPadded, (SPM_NROWSPAD(mat))*sizeof(ghost_lidx)),err,ret); #pragma omp parallel for schedule(runtime) for (i=0; i<SPM_NROWSPAD(mat); i++) { localMat->rowLen[i] = 0; remoteMat->rowLen[i] = 0; localMat->rowLenPadded[i] = 0; remoteMat->rowLenPadded[i] = 0; } #pragma omp parallel for schedule(runtime) for(chunk = 0; chunk < SPM_NCHUNKS(mat); chunk++) { localMat->chunkLen[chunk] = 0; remoteMat->chunkLen[chunk] = 0; localMat->chunkLenPadded[chunk] = 0; remoteMat->chunkLenPadded[chunk] = 0; localMat->chunkMin[chunk] = 0; remoteMat->chunkMin[chunk] = 0; } localMat->chunkStart[0] = 0; remoteMat->chunkStart[0] = 0; lnEnts_l = 0; lnEnts_r = 0; for(chunk = 0; chunk < SPM_NCHUNKS(mat); chunk++) { for (i=0; i<mat->chunkLen[chunk]; i++) { for (j=0; j<mat->traits.C; j++) { row = chunk*mat->traits.C+j; idx = mat->chunkStart[chunk]+i*mat->traits.C+j; if (i < mat->rowLen[row]) { if (mat->col[idx] < mat->context->row_map->ldim[me]) { localMat->rowLen[row]++; } else { remoteMat->rowLen[row]++; } localMat->rowLenPadded[row] = PAD(localMat->rowLen[row],mat->localPart->traits.T); remoteMat->rowLenPadded[row] = PAD(remoteMat->rowLen[row],mat->remotePart->traits.T); } } } for (j=0; j<mat->traits.C; j++) { row = chunk*mat->traits.C+j; localMat->chunkLen[chunk] = MAX(localMat->chunkLen[chunk],localMat->rowLen[row]); remoteMat->chunkLen[chunk] = MAX(remoteMat->chunkLen[chunk],remoteMat->rowLen[row]); } lnEnts_l += localMat->chunkLen[chunk]*mat->traits.C; lnEnts_r += remoteMat->chunkLen[chunk]*mat->traits.C; localMat->chunkStart[chunk+1] = lnEnts_l; remoteMat->chunkStart[chunk+1] = lnEnts_r; localMat->chunkLenPadded[chunk] = PAD(localMat->chunkLen[chunk],mat->localPart->traits.T); remoteMat->chunkLenPadded[chunk] = PAD(remoteMat->chunkLen[chunk],mat->remotePart->traits.T); } /* * for (i=0; i<mat->nEnts;i++) { * if (mat->col[i]<mat->context->row_map->ldim[me]) lnEnts_l++; } lnEnts_r = mat->context->lnEnts[me]-lnEnts_l;*/ GHOST_CALL_GOTO(ghost_malloc_align((void **)&localMat->val,lnEnts_l*mat->elSize,GHOST_DATA_ALIGNMENT),err,ret); GHOST_CALL_GOTO(ghost_malloc_align((void **)&localMat->col,lnEnts_l*sizeof(ghost_lidx),GHOST_DATA_ALIGNMENT),err,ret); GHOST_CALL_GOTO(ghost_malloc_align((void **)&remoteMat->val,lnEnts_r*mat->elSize,GHOST_DATA_ALIGNMENT),err,ret); GHOST_CALL_GOTO(ghost_malloc_align((void **)&remoteMat->col,lnEnts_r*sizeof(ghost_lidx),GHOST_DATA_ALIGNMENT),err,ret); mat->localPart->nEnts = lnEnts_l; mat->localPart->traits.C = mat->traits.C; mat->remotePart->nEnts = lnEnts_r; mat->remotePart->traits.C = mat->traits.C; #pragma omp parallel for schedule(runtime) private (i,j,idx) for(chunk = 0; chunk < SPM_NCHUNKS(mat->localPart); chunk++) { for (i=0; i<localMat->chunkLenPadded[chunk]; i++) { for (j=0; j<mat->localPart->traits.C; j++) { idx = localMat->chunkStart[chunk]+i*mat->localPart->traits.C+j; memset(&((char *)(localMat->val))[idx*mat->elSize],0,mat->elSize); localMat->col[idx] = 0; } } } #pragma omp parallel for schedule(runtime) private (i,j,idx) for(chunk = 0; chunk < SPM_NCHUNKS(mat->remotePart); chunk++) { for (i=0; i<remoteMat->chunkLenPadded[chunk]; i++) { for (j=0; j<mat->remotePart->traits.C; j++) { idx = remoteMat->chunkStart[chunk]+i*mat->remotePart->traits.C+j; memset(&((char *)(remoteMat->val))[idx*mat->elSize],0,mat->elSize); remoteMat->col[idx] = 0; } } } current_l = 0; current_r = 0; ghost_lidx *col_l, *col_r; ghost_malloc((void **)&col_l,sizeof(ghost_lidx)*mat->traits.C); ghost_malloc((void **)&col_r,sizeof(ghost_lidx)*mat->traits.C); for(chunk = 0; chunk < SPM_NCHUNKS(mat); chunk++) { for (j=0; j<mat->traits.C; j++) { col_l[j] = 0; col_r[j] = 0; } for (i=0; i<mat->chunkLen[chunk]; i++) { for (j=0; j<mat->traits.C; j++) { row = chunk*mat->traits.C+j; idx = mat->chunkStart[chunk]+i*mat->traits.C+j; if (i<mat->rowLen[row]) { if (mat->col[idx] < mat->context->row_map->ldim[me]) { if (col_l[j] < localMat->rowLen[row]) { ghost_lidx lidx = localMat->chunkStart[chunk]+col_l[j]*mat->localPart->traits.C+j; localMat->col[lidx] = mat->col[idx]; memcpy(&localMat->val[lidx*mat->elSize],&mat->val[idx*mat->elSize],mat->elSize); current_l++; } col_l[j]++; } else{ if (col_r[j] < remoteMat->rowLen[row]) { ghost_lidx ridx = remoteMat->chunkStart[chunk]+col_r[j]*mat->remotePart->traits.C+j; remoteMat->col[ridx] = mat->col[idx]; memcpy(&remoteMat->val[ridx*mat->elSize],&mat->val[idx*mat->elSize],mat->elSize); current_r++; } col_r[j]++; } } } } } free(col_l); free(col_r); #ifdef GHOST_HAVE_CUDA if (!(mat->traits.flags & GHOST_SPARSEMAT_HOST)) { ghost_sparsemat_upload(mat->localPart); ghost_sparsemat_upload(mat->remotePart); } #endif GHOST_INSTR_STOP("split"); } goto out; err: ghost_sparsemat_destroy(mat->localPart); mat->localPart = NULL; ghost_sparsemat_destroy(mat->remotePart); mat->remotePart = NULL; out: GHOST_FUNC_EXIT(GHOST_FUNCTYPE_INITIALIZATION); return ret; } ghost_error ghost_sparsemat_to_bin(ghost_sparsemat *mat, char *matrixPath) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_IO); UNUSED(mat); UNUSED(matrixPath); GHOST_ERROR_LOG("SELL matrix to binary CRS file not implemented"); GHOST_FUNC_EXIT(GHOST_FUNCTYPE_IO); return GHOST_ERR_NOT_IMPLEMENTED; } #ifdef GHOST_HAVE_CUDA static ghost_error ghost_sparsemat_upload(ghost_sparsemat* mat) { GHOST_FUNC_ENTER(GHOST_FUNCTYPE_COMMUNICATION); if (!(mat->traits.flags & GHOST_SPARSEMAT_HOST)) { GHOST_DEBUG_LOG(1,"Creating matrix on CUDA device"); GHOST_CALL_RETURN(ghost_cu_malloc((void **)&mat->cu_rowLen,(SPM_NROWS(mat))*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_malloc((void **)&mat->cu_rowLenPadded,(SPM_NROWS(mat))*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_malloc((void **)&mat->cu_col,(mat->nEnts)*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_malloc((void **)&mat->cu_val,(mat->nEnts)*mat->elSize)); GHOST_CALL_RETURN(ghost_cu_malloc((void **)&mat->cu_chunkStart,(SPM_NROWSPAD(mat)/mat->traits.C+1)*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_malloc((void **)&mat->cu_chunkLen,(SPM_NROWSPAD(mat)/mat->traits.C)*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_upload(mat->cu_rowLen, mat->rowLen, SPM_NROWS(mat)*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_upload(mat->cu_rowLenPadded, mat->rowLenPadded, SPM_NROWS(mat)*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_upload(mat->cu_col, mat->col, mat->nEnts*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_upload(mat->cu_val, mat->val, mat->nEnts*mat->elSize)); GHOST_CALL_RETURN(ghost_cu_upload(mat->cu_chunkStart, mat->chunkStart, (SPM_NROWSPAD(mat)/mat->traits.C+1)*sizeof(ghost_lidx))); GHOST_CALL_RETURN(ghost_cu_upload(mat->cu_chunkLen, mat->chunkLen, (SPM_NROWSPAD(mat)/mat->traits.C)*sizeof(ghost_lidx))); } GHOST_FUNC_EXIT(GHOST_FUNCTYPE_COMMUNICATION); return GHOST_SUCCESS; } #endif
single.c
#include <stdio.h> #include <omp.h> void work1() { printf("%s","Work1.\n"); } void work2() { printf("%s","Work2.\n"); } int main(int argc, char const *argv[]) { /* code */ #pragma omp parallel { #pragma omp single printf("%s","Beginning work1.\n"); work1(); #pragma omp single printf("%s","Finishing work1.\n"); #pragma omp single nowait printf("%s","Finished work1 and beginning work2.\n"); work2(); } return 0; }
kthread_test.c
#include <stdio.h> #include <assert.h> #include <stdlib.h> #include <pthread.h> #if HAVE_CILK #include <cilk/cilk.h> #include <cilk/cilk_api.h> #endif typedef struct { int max_iter, w, h; double xmin, xmax, ymin, ymax; int *k; } global_t; static void compute(void *_g, int i, int tid) { global_t *g = (global_t*)_g; double x, x0 = g->xmin + (g->xmax - g->xmin) * (i%g->w) / g->w; double y, y0 = g->ymin + (g->ymax - g->ymin) * (i/g->w) / g->h; int k; assert(g->k[i] < 0); x = x0, y = y0; for (k = 0; k < g->max_iter; ++k) { double z = x * y; x *= x; y *= y; if (x + y >= 4) break; x = x - y + x0; y = z + z + y0; } g->k[i] = k; } void kt_for(int n_threads, int n_items, void (*func)(void*,int,int), void *data); int main(int argc, char *argv[]) { int i, tmp, tot, type = 0, n_threads = 2; global_t global = { 10240*100, 800, 600, -2., -1.2, -1.2, 1.2, 0 }; // global_t global = { 10240*1, 8, 6, -2., -1.2, -1.2, 1.2, 0 }; if (argc > 1) { type = argv[1][0] == 'o'? 2 : argv[1][0] == 'c'? 3 : argv[1][0] == 'n'? 1 : 0; if (argv[1][0] >= '0' && argv[1][0] <= '9') n_threads = atoi(argv[1]); } else { fprintf(stderr, "Usage: ./a.out [openmp | cilk | #threads]\n"); } tot = global.w * global.h; global.k = calloc(tot, sizeof(int)); for (i = 0; i < tot; ++i) global.k[i] = -1; if (type == 0) { kt_for(n_threads, tot, compute, &global); } else if (type == 2) { #pragma omp parallel for for (i = 0; i < tot; ++i) compute(&global, i, 0); } else if (type == 3) { #if HAVE_CILK cilk_for (i = 0; i < tot; ++i) compute(&global, i, 0); #endif } for (i = tmp = 0; i < tot; ++i) tmp += (global.k[i] < 0); free(global.k); assert(tmp == 0); return 0; }
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/distort.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/list.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/shear.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortImageMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortImageMethod *method,const size_t number_arguments, const double *arguments,size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) memset(distort_args,0,12*sizeof(double)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod); if (image->matte == MagickFalse) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel); InheritException(exception,&image->exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. */ Image *resize_alpha; /* distort alpha channel separately */ (void) SeparateImageChannel(tmp_image,TrueAlphaChannel); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_alpha == (Image *) NULL ) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel); (void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel); (void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha, 0,0); InheritException(exception,&resize_image->exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->matte=image->matte; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortImageMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image,DistortImageMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if (method == ResizeDistortion) { if (number_arguments != 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t) arguments[0], (size_t) arguments[1],exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff=GenerateCoefficients(image,&method,number_arguments,arguments,0, exception); if (coeff == (double *) NULL) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","`%s' `%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) { register ssize_t i; char image_gen[MaxTextExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case PerspectiveDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr, "Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i<4; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for (; i<7; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n", coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n", coeff[8] < 0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: (void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", 0.5-coeff[3], 0.5-coeff[7]); (void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if ( coeff[9] != 0 ) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n", -2*coeff[9], coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n", coeff[9]); } else (void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4], coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n", -coeff[1], coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case BilinearReverseDistortion: #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n", coeff[0],(unsigned long) nterms); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n yy ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n"); for ( i=0; i<5; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n", coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n", coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", -coeff[2], -coeff[3]); (void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1], coeff[7] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] ); (void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] ); (void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] ); (void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc,yc; /* NOTE: This does the barrel roll in pixel coords not image coords ** The internal distortion must do it in image coordinates, ** so that is what the center coeff (8,9) is given in. */ xc = ((double)image->columns-1.0)/2.0 + image->page.x; yc = ((double)image->rows-1.0)/2.0 + image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n", coeff[8]-0.5, coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[4],coeff[5],coeff[6],coeff[7]); (void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s","-define distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); return((Image *) NULL); } /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); InheritException(exception,&distort_image->exception); distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace); if (distort_image->background_color.opacity != OpaqueOpacity) distort_image->matte=MagickTrue; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetMagickPixelPacket(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; MagickPixelPacket pixel, /* pixel color to assign to distorted image */ invalid; /* the color to assign when distort result is invalid */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register IndexPacket *magick_restrict indexes; register ssize_t i; register PixelPacket *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(distort_view); pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; GetMagickPixelPacket(distort_image,&invalid); SetMagickPixelPacket(distort_image,&distort_image->matte_color, (IndexPacket *) NULL, &invalid); if (distort_image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&invalid); /* what about other color spaces? */ for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5); if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 1 /*if ( i == 0 && j == 0 ) {*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelPacket(distort_image,&invalid,q,indexes); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelPacket(distort_image,&pixel,q,indexes); } q++; indexes++; } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DistortImage) #endif proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff=(double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; MagickRealType angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image,const ChannelType channel, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o channel: Specify which color values (in RGBKA sequence) are being set. % This also determines the number of color_values in above. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const ChannelType channel,const SparseColorMethod method, const size_t number_arguments,const double *arguments, ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ( channel & RedChannel ) number_colors++; if ( channel & GreenChannel ) number_colors++; if ( channel & BlueChannel ) number_colors++; if ( channel & IndexChannel ) number_colors++; if ( channel & OpacityChannel ) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortImageMethod distort_method; distort_method=(DistortImageMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ( channel & RedChannel ) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & GreenChannel ) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & BlueChannel ) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & IndexChannel ) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & OpacityChannel ) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ( channel & RedChannel ) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & GreenChannel ) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & BlueChannel ) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & IndexChannel ) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & OpacityChannel ) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ InheritException(exception,&image->exception); sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; MagickPixelPacket pixel; /* pixel to assign to distorted image */ register IndexPacket *magick_restrict indexes; register ssize_t i; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(sparse_view); GetMagickPixelPacket(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { SetMagickPixelPacket(image,q,indexes,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ( channel & RedChannel ) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & GreenChannel ) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & BlueChannel ) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & IndexChannel ) pixel.index = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & OpacityChannel ) pixel.opacity = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ( channel & RedChannel ) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & GreenChannel ) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & BlueChannel ) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & IndexChannel ) pixel.index = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & OpacityChannel ) pixel.opacity = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ( channel & RedChannel ) pixel.red = 0.0; if ( channel & GreenChannel ) pixel.green = 0.0; if ( channel & BlueChannel ) pixel.blue = 0.0; if ( channel & IndexChannel ) pixel.index = 0.0; if ( channel & OpacityChannel ) pixel.opacity = 0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ( channel & RedChannel ) pixel.red += arguments[x++]*weight; if ( channel & GreenChannel ) pixel.green += arguments[x++]*weight; if ( channel & BlueChannel ) pixel.blue += arguments[x++]*weight; if ( channel & IndexChannel ) pixel.index += arguments[x++]*weight; if ( channel & OpacityChannel ) pixel.opacity += arguments[x++]*weight; denominator += weight; } if ( channel & RedChannel ) pixel.red /= denominator; if ( channel & GreenChannel ) pixel.green /= denominator; if ( channel & BlueChannel ) pixel.blue /= denominator; if ( channel & IndexChannel ) pixel.index /= denominator; if ( channel & OpacityChannel ) pixel.opacity /= denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ( channel & RedChannel ) pixel.red = arguments[x++]; if ( channel & GreenChannel ) pixel.green = arguments[x++]; if ( channel & BlueChannel ) pixel.blue = arguments[x++]; if ( channel & IndexChannel ) pixel.index = arguments[x++]; if ( channel & OpacityChannel ) pixel.opacity = arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ( channel & RedChannel ) pixel.red = arguments[x++]; if ( channel & GreenChannel ) pixel.green = arguments[x++]; if ( channel & BlueChannel ) pixel.blue = arguments[x++]; if ( channel & IndexChannel ) pixel.index = arguments[x++]; if ( channel & OpacityChannel ) pixel.opacity = arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ( channel & RedChannel ) pixel.red=ClampPixel(QuantumRange*pixel.red); if ( channel & GreenChannel ) pixel.green=ClampPixel(QuantumRange*pixel.green); if ( channel & BlueChannel ) pixel.blue=ClampPixel(QuantumRange*pixel.blue); if ( channel & IndexChannel ) pixel.index=ClampPixel(QuantumRange*pixel.index); if ( channel & OpacityChannel ) pixel.opacity=ClampPixel(QuantumRange*pixel.opacity); SetPixelPacket(sparse_image,&pixel,q,indexes); q++; indexes++; } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SparseColorImage) #endif proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
integrate_cython.c
/* Generated by Cython 0.14.1+ on Fri Jul 15 21:59:40 2011 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #else #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #if PY_VERSION_HEX < 0x02040000 #define METH_COEXIST 0 #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) #define PyDict_Contains(d,o) PySequence_Contains(d,o) #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) PyInt_AsLong(o) #define PyNumber_Index(o) PyNumber_Int(o) #define PyIndex_Check(o) PyNumber_Check(o) #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__integrate_cython #define __PYX_HAVE_API__integrate_cython #include "integrate_c.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || defined(__INTEL_COMPILER) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "integrate_cython.pyx", }; /*--- Type declarations ---*/ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ static int __Pyx_Print(PyObject*, PyObject *, int); /*proto*/ #if PY_MAJOR_VERSION >= 3 static PyObject* __pyx_print = 0; static PyObject* __pyx_print_kwargs = 0; #endif static int __Pyx_PrintOne(PyObject* stream, PyObject *o); /*proto*/ static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename); /*proto*/ static int __Pyx_check_binary_version(void); static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, int __pyx_lineno, const char *__pyx_filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'integrate_cython' */ static double __pyx_f_16integrate_cython_f(double, int __pyx_skip_dispatch); /*proto*/ static PyObject *__pyx_f_16integrate_cython_integrate(PyObject *, PyObject *, PyObject *, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_16integrate_cython_integrate_typed(double, double, int, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_16integrate_cython_integrate_omp(double, double, int, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_16integrate_cython_integrate_c(double, double, int, int __pyx_skip_dispatch); /*proto*/ static double __pyx_f_16integrate_cython_integrate_c_omp(double, double, int, int __pyx_skip_dispatch); /*proto*/ #define __Pyx_MODULE_NAME "integrate_cython" int __pyx_module_is_main_integrate_cython = 0; /* Implementation of 'integrate_cython' */ static PyObject *__pyx_builtin_xrange; static char __pyx_k_1[] = "dx == 0!"; static char __pyx_k__N[] = "N"; static char __pyx_k__a[] = "a"; static char __pyx_k__b[] = "b"; static char __pyx_k__range[] = "range"; static char __pyx_k__xrange[] = "xrange"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static PyObject *__pyx_kp_s_1; static PyObject *__pyx_n_s__N; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s__a; static PyObject *__pyx_n_s__b; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__xrange; static PyObject *__pyx_int_0; /* "integrate_cython.pyx":11 * double lib_integrate_c_omp(double a, double b, int N) * * cpdef double f(double x) nogil: # <<<<<<<<<<<<<< * return x*x * */ static PyObject *__pyx_pf_16integrate_cython_f(PyObject *__pyx_self, PyObject *__pyx_arg_x); /*proto*/ static double __pyx_f_16integrate_cython_f(double __pyx_v_x, int __pyx_skip_dispatch) { double __pyx_r; /* "integrate_cython.pyx":12 * * cpdef double f(double x) nogil: * return x*x # <<<<<<<<<<<<<< * * cpdef integrate(a, b, N): */ __pyx_r = (__pyx_v_x * __pyx_v_x); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "integrate_cython.pyx":11 * double lib_integrate_c_omp(double a, double b, int N) * * cpdef double f(double x) nogil: # <<<<<<<<<<<<<< * return x*x * */ static PyObject *__pyx_pf_16integrate_cython_f(PyObject *__pyx_self, PyObject *__pyx_arg_x); /*proto*/ static PyObject *__pyx_pf_16integrate_cython_f(PyObject *__pyx_self, PyObject *__pyx_arg_x) { double __pyx_v_x; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("f"); __pyx_self = __pyx_self; assert(__pyx_arg_x); { __pyx_v_x = __pyx_PyFloat_AsDouble(__pyx_arg_x); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("integrate_cython.f", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_16integrate_cython_f(__pyx_v_x, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("integrate_cython.f", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":14 * return x*x * * cpdef integrate(a, b, N): # <<<<<<<<<<<<<< * s = 0 * dx = (b-a)/N */ static PyObject *__pyx_pf_16integrate_cython_1integrate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_f_16integrate_cython_integrate(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_N, int __pyx_skip_dispatch) { PyObject *__pyx_v_s = NULL; PyObject *__pyx_v_dx = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; Py_ssize_t __pyx_t_4; PyObject *(*__pyx_t_5)(PyObject *); PyObject *__pyx_t_6 = NULL; double __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("integrate"); /* "integrate_cython.pyx":15 * * cpdef integrate(a, b, N): * s = 0 # <<<<<<<<<<<<<< * dx = (b-a)/N * */ __Pyx_INCREF(__pyx_int_0); __pyx_v_s = __pyx_int_0; /* "integrate_cython.pyx":16 * cpdef integrate(a, b, N): * s = 0 * dx = (b-a)/N # <<<<<<<<<<<<<< * * if dx == 0: */ __pyx_t_1 = PyNumber_Subtract(__pyx_v_b, __pyx_v_a); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_v_N); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dx = __pyx_t_2; __pyx_t_2 = 0; /* "integrate_cython.pyx":18 * dx = (b-a)/N * * if dx == 0: # <<<<<<<<<<<<<< * print "dx == 0!" * return 0 */ __pyx_t_2 = PyObject_RichCompare(__pyx_v_dx, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "integrate_cython.pyx":19 * * if dx == 0: * print "dx == 0!" # <<<<<<<<<<<<<< * return 0 * */ if (__Pyx_PrintOne(0, ((PyObject *)__pyx_kp_s_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "integrate_cython.pyx":20 * if dx == 0: * print "dx == 0!" * return 0 # <<<<<<<<<<<<<< * * for i in xrange(N): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_int_0); __pyx_r = __pyx_int_0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "integrate_cython.pyx":22 * return 0 * * for i in xrange(N): # <<<<<<<<<<<<<< * s += f(a + (i + 1./2.)*dx)*dx * return s */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __Pyx_INCREF(__pyx_v_N); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_N); __Pyx_GIVEREF(__pyx_v_N); __pyx_t_1 = PyObject_Call(__pyx_builtin_xrange, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (PyList_CheckExact(__pyx_t_1) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (PyList_CheckExact(__pyx_t_2)) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break; __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_1); __pyx_t_4++; } else if (PyTuple_CheckExact(__pyx_t_2)) { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_2)) break; __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_1); __pyx_t_4++; } else { __pyx_t_1 = __pyx_t_5(__pyx_t_2); if (unlikely(!__pyx_t_1)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_1); } __Pyx_XDECREF(__pyx_v_i); __pyx_v_i = __pyx_t_1; __pyx_t_1 = 0; /* "integrate_cython.pyx":23 * * for i in xrange(N): * s += f(a + (i + 1./2.)*dx)*dx # <<<<<<<<<<<<<< * return s * */ __pyx_t_1 = PyFloat_FromDouble((1. / 2.)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = PyNumber_Add(__pyx_v_i, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyNumber_Multiply(__pyx_t_6, __pyx_v_dx); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_v_a, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_7 = __pyx_PyFloat_AsDouble(__pyx_t_6); if (unlikely((__pyx_t_7 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyFloat_FromDouble(__pyx_f_16integrate_cython_f(__pyx_t_7, 0)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = PyNumber_Multiply(__pyx_t_6, __pyx_v_dx); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_s, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_v_s); __pyx_v_s = __pyx_t_6; __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "integrate_cython.pyx":24 * for i in xrange(N): * s += f(a + (i + 1./2.)*dx)*dx * return s # <<<<<<<<<<<<<< * * cpdef double integrate_typed(double a, double b, int N): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_s); __pyx_r = __pyx_v_s; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("integrate_cython.integrate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_s); __Pyx_XDECREF(__pyx_v_dx); __Pyx_XDECREF(__pyx_v_i); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":14 * return x*x * * cpdef integrate(a, b, N): # <<<<<<<<<<<<<< * s = 0 * dx = (b-a)/N */ static PyObject *__pyx_pf_16integrate_cython_1integrate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pf_16integrate_cython_1integrate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_a = 0; PyObject *__pyx_v_b = 0; PyObject *__pyx_v_N = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__b,&__pyx_n_s__N,0}; __Pyx_RefNannySetupContext("integrate"); __pyx_self = __pyx_self; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); PyObject* values[3] = {0,0,0}; switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__b); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__N); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "integrate") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } __pyx_v_a = values[0]; __pyx_v_b = values[1]; __pyx_v_N = values[2]; } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { __pyx_v_a = PyTuple_GET_ITEM(__pyx_args, 0); __pyx_v_b = PyTuple_GET_ITEM(__pyx_args, 1); __pyx_v_N = PyTuple_GET_ITEM(__pyx_args, 2); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("integrate", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("integrate_cython.integrate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_f_16integrate_cython_integrate(__pyx_v_a, __pyx_v_b, __pyx_v_N, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("integrate_cython.integrate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":26 * return s * * cpdef double integrate_typed(double a, double b, int N): # <<<<<<<<<<<<<< * cdef double s = 0 * cdef double dx = (b-a)/N */ static PyObject *__pyx_pf_16integrate_cython_2integrate_typed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_16integrate_cython_integrate_typed(double __pyx_v_a, double __pyx_v_b, int __pyx_v_N, int __pyx_skip_dispatch) { double __pyx_v_s; double __pyx_v_dx; int __pyx_v_i; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("integrate_typed"); /* "integrate_cython.pyx":27 * * cpdef double integrate_typed(double a, double b, int N): * cdef double s = 0 # <<<<<<<<<<<<<< * cdef double dx = (b-a)/N * cdef int i */ __pyx_v_s = 0.0; /* "integrate_cython.pyx":28 * cpdef double integrate_typed(double a, double b, int N): * cdef double s = 0 * cdef double dx = (b-a)/N # <<<<<<<<<<<<<< * cdef int i * */ __pyx_t_1 = (__pyx_v_b - __pyx_v_a); if (unlikely(__pyx_v_N == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "float division"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_dx = (__pyx_t_1 / __pyx_v_N); /* "integrate_cython.pyx":31 * cdef int i * * if dx == 0: # <<<<<<<<<<<<<< * print "dx == 0!" * return 0 */ __pyx_t_2 = (__pyx_v_dx == 0.0); if (__pyx_t_2) { /* "integrate_cython.pyx":32 * * if dx == 0: * print "dx == 0!" # <<<<<<<<<<<<<< * return 0 * */ if (__Pyx_PrintOne(0, ((PyObject *)__pyx_kp_s_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "integrate_cython.pyx":33 * if dx == 0: * print "dx == 0!" * return 0 # <<<<<<<<<<<<<< * * for i in xrange(N): */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "integrate_cython.pyx":35 * return 0 * * for i in xrange(N): # <<<<<<<<<<<<<< * s += f(a + (i + 1./2.)*dx)*dx * return s */ __pyx_t_3 = __pyx_v_N; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "integrate_cython.pyx":36 * * for i in xrange(N): * s += f(a + (i + 1./2.)*dx)*dx # <<<<<<<<<<<<<< * return s * */ __pyx_v_s = (__pyx_v_s + (__pyx_f_16integrate_cython_f((__pyx_v_a + ((__pyx_v_i + (1. / 2.)) * __pyx_v_dx)), 0) * __pyx_v_dx)); } /* "integrate_cython.pyx":37 * for i in xrange(N): * s += f(a + (i + 1./2.)*dx)*dx * return s # <<<<<<<<<<<<<< * * cpdef double integrate_omp(double a, double b, int N): */ __pyx_r = __pyx_v_s; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("integrate_cython.integrate_typed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":26 * return s * * cpdef double integrate_typed(double a, double b, int N): # <<<<<<<<<<<<<< * cdef double s = 0 * cdef double dx = (b-a)/N */ static PyObject *__pyx_pf_16integrate_cython_2integrate_typed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pf_16integrate_cython_2integrate_typed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_a; double __pyx_v_b; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__b,&__pyx_n_s__N,0}; __Pyx_RefNannySetupContext("integrate_typed"); __pyx_self = __pyx_self; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); PyObject* values[3] = {0,0,0}; switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__b); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_typed", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__N); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_typed", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "integrate_typed") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } __pyx_v_a = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { __pyx_v_a = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 2)); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("integrate_typed", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("integrate_cython.integrate_typed", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_16integrate_cython_integrate_typed(__pyx_v_a, __pyx_v_b, __pyx_v_N, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("integrate_cython.integrate_typed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":39 * return s * * cpdef double integrate_omp(double a, double b, int N): # <<<<<<<<<<<<<< * cdef double s = 0 * cdef double dx = (b-a)/N */ static PyObject *__pyx_pf_16integrate_cython_3integrate_omp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_16integrate_cython_integrate_omp(double __pyx_v_a, double __pyx_v_b, int __pyx_v_N, int __pyx_skip_dispatch) { double __pyx_v_s; double __pyx_v_dx; int __pyx_v_i; double __pyx_r; __Pyx_RefNannyDeclarations double __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("integrate_omp"); /* "integrate_cython.pyx":40 * * cpdef double integrate_omp(double a, double b, int N): * cdef double s = 0 # <<<<<<<<<<<<<< * cdef double dx = (b-a)/N * cdef int i */ __pyx_v_s = 0.0; /* "integrate_cython.pyx":41 * cpdef double integrate_omp(double a, double b, int N): * cdef double s = 0 * cdef double dx = (b-a)/N # <<<<<<<<<<<<<< * cdef int i * */ __pyx_t_1 = (__pyx_v_b - __pyx_v_a); if (unlikely(__pyx_v_N == 0)) { PyErr_Format(PyExc_ZeroDivisionError, "float division"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_dx = (__pyx_t_1 / __pyx_v_N); /* "integrate_cython.pyx":44 * cdef int i * * if dx == 0: # <<<<<<<<<<<<<< * print "dx == 0!" * return 0 */ __pyx_t_2 = (__pyx_v_dx == 0.0); if (__pyx_t_2) { /* "integrate_cython.pyx":45 * * if dx == 0: * print "dx == 0!" # <<<<<<<<<<<<<< * return 0 * */ if (__Pyx_PrintOne(0, ((PyObject *)__pyx_kp_s_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "integrate_cython.pyx":46 * if dx == 0: * print "dx == 0!" * return 0 # <<<<<<<<<<<<<< * * for i in prange(N, nogil=True, schedule=guided): */ __pyx_r = 0.0; goto __pyx_L0; goto __pyx_L3; } __pyx_L3:; /* "integrate_cython.pyx":48 * return 0 * * for i in prange(N, nogil=True, schedule=guided): # <<<<<<<<<<<<<< * s += f(a + (i + 1./2.)*dx)*dx * return s */ { #ifdef WITH_THREAD PyThreadState *_save = NULL; #endif Py_UNBLOCK_THREADS /*try:*/ { __pyx_t_3 = __pyx_v_N; if (1 == 0) abort(); __pyx_t_5 = (__pyx_t_3 - 0) / 1; if (__pyx_t_5 > 0) { __pyx_v_i = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+:__pyx_v_s) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_5; __pyx_t_4++){ __pyx_v_i = 0 + 1 * __pyx_t_4; /* Initialize private variables to invalid values */ /* "integrate_cython.pyx":49 * * for i in prange(N, nogil=True, schedule=guided): * s += f(a + (i + 1./2.)*dx)*dx # <<<<<<<<<<<<<< * return s * */ __pyx_v_s = (__pyx_v_s + (__pyx_f_16integrate_cython_f((__pyx_v_a + ((__pyx_v_i + (1. / 2.)) * __pyx_v_dx)), 0) * __pyx_v_dx)); } } } /* "integrate_cython.pyx":48 * return 0 * * for i in prange(N, nogil=True, schedule=guided): # <<<<<<<<<<<<<< * s += f(a + (i + 1./2.)*dx)*dx * return s */ /*finally:*/ { Py_BLOCK_THREADS } } /* "integrate_cython.pyx":50 * for i in prange(N, nogil=True, schedule=guided): * s += f(a + (i + 1./2.)*dx)*dx * return s # <<<<<<<<<<<<<< * * cpdef double integrate_c(double a, double b, int N): */ __pyx_r = __pyx_v_s; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("integrate_cython.integrate_omp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":39 * return s * * cpdef double integrate_omp(double a, double b, int N): # <<<<<<<<<<<<<< * cdef double s = 0 * cdef double dx = (b-a)/N */ static PyObject *__pyx_pf_16integrate_cython_3integrate_omp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pf_16integrate_cython_3integrate_omp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_a; double __pyx_v_b; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__b,&__pyx_n_s__N,0}; __Pyx_RefNannySetupContext("integrate_omp"); __pyx_self = __pyx_self; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); PyObject* values[3] = {0,0,0}; switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__b); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_omp", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__N); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_omp", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "integrate_omp") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } __pyx_v_a = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { __pyx_v_a = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 2)); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("integrate_omp", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("integrate_cython.integrate_omp", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_16integrate_cython_integrate_omp(__pyx_v_a, __pyx_v_b, __pyx_v_N, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("integrate_cython.integrate_omp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":52 * return s * * cpdef double integrate_c(double a, double b, int N): # <<<<<<<<<<<<<< * return lib_integrate_c(a, b, N) * */ static PyObject *__pyx_pf_16integrate_cython_4integrate_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_16integrate_cython_integrate_c(double __pyx_v_a, double __pyx_v_b, int __pyx_v_N, int __pyx_skip_dispatch) { double __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("integrate_c"); /* "integrate_cython.pyx":53 * * cpdef double integrate_c(double a, double b, int N): * return lib_integrate_c(a, b, N) # <<<<<<<<<<<<<< * * cpdef double integrate_c_omp(double a, double b, int N): */ __pyx_r = lib_integrate_c(__pyx_v_a, __pyx_v_b, __pyx_v_N); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":52 * return s * * cpdef double integrate_c(double a, double b, int N): # <<<<<<<<<<<<<< * return lib_integrate_c(a, b, N) * */ static PyObject *__pyx_pf_16integrate_cython_4integrate_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pf_16integrate_cython_4integrate_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_a; double __pyx_v_b; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__b,&__pyx_n_s__N,0}; __Pyx_RefNannySetupContext("integrate_c"); __pyx_self = __pyx_self; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); PyObject* values[3] = {0,0,0}; switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__b); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_c", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__N); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_c", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "integrate_c") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } __pyx_v_a = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { __pyx_v_a = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 2)); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("integrate_c", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("integrate_cython.integrate_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_16integrate_cython_integrate_c(__pyx_v_a, __pyx_v_b, __pyx_v_N, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("integrate_cython.integrate_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":55 * return lib_integrate_c(a, b, N) * * cpdef double integrate_c_omp(double a, double b, int N): # <<<<<<<<<<<<<< * return lib_integrate_c_omp(a, b, N) */ static PyObject *__pyx_pf_16integrate_cython_5integrate_c_omp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static double __pyx_f_16integrate_cython_integrate_c_omp(double __pyx_v_a, double __pyx_v_b, int __pyx_v_N, int __pyx_skip_dispatch) { double __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("integrate_c_omp"); /* "integrate_cython.pyx":56 * * cpdef double integrate_c_omp(double a, double b, int N): * return lib_integrate_c_omp(a, b, N) # <<<<<<<<<<<<<< */ __pyx_r = lib_integrate_c_omp(__pyx_v_a, __pyx_v_b, __pyx_v_N); goto __pyx_L0; __pyx_r = 0; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "integrate_cython.pyx":55 * return lib_integrate_c(a, b, N) * * cpdef double integrate_c_omp(double a, double b, int N): # <<<<<<<<<<<<<< * return lib_integrate_c_omp(a, b, N) */ static PyObject *__pyx_pf_16integrate_cython_5integrate_c_omp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pf_16integrate_cython_5integrate_c_omp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_a; double __pyx_v_b; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__b,&__pyx_n_s__N,0}; __Pyx_RefNannySetupContext("integrate_c_omp"); __pyx_self = __pyx_self; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args = PyDict_Size(__pyx_kwds); PyObject* values[3] = {0,0,0}; switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__b); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_c_omp", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__N); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("integrate_c_omp", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "integrate_c_omp") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } __pyx_v_a = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(values[2]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { __pyx_v_a = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_a == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_b = __pyx_PyFloat_AsDouble(PyTuple_GET_ITEM(__pyx_args, 1)); if (unlikely((__pyx_v_b == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_N = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 2)); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("integrate_c_omp", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("integrate_cython.integrate_c_omp", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble(__pyx_f_16integrate_cython_integrate_c_omp(__pyx_v_a, __pyx_v_b, __pyx_v_N, 0)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("integrate_cython.integrate_c_omp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {__Pyx_NAMESTR("f"), (PyCFunction)__pyx_pf_16integrate_cython_f, METH_O, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("integrate"), (PyCFunction)__pyx_pf_16integrate_cython_1integrate, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("integrate_typed"), (PyCFunction)__pyx_pf_16integrate_cython_2integrate_typed, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("integrate_omp"), (PyCFunction)__pyx_pf_16integrate_cython_3integrate_omp, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("integrate_c"), (PyCFunction)__pyx_pf_16integrate_cython_4integrate_c, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("integrate_c_omp"), (PyCFunction)__pyx_pf_16integrate_cython_5integrate_c_omp, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("integrate_cython"), 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0}, {&__pyx_n_s__N, __pyx_k__N, sizeof(__pyx_k__N), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s__a, __pyx_k__a, sizeof(__pyx_k__a), 0, 0, 1, 1}, {&__pyx_n_s__b, __pyx_k__b, sizeof(__pyx_k__b), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__xrange, __pyx_k__xrange, sizeof(__pyx_k__xrange), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { #if PY_MAJOR_VERSION >= 3 __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initintegrate_cython(void); /*proto*/ PyMODINIT_FUNC initintegrate_cython(void) #else PyMODINIT_FUNC PyInit_integrate_cython(void); /*proto*/ PyMODINIT_FUNC PyInit_integrate_cython(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_integrate_cython(void)"); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __pyx_binding_PyCFunctionType_USED if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("integrate_cython"), __pyx_methods, 0, 0, PYTHON_API_VERSION); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; #if PY_MAJOR_VERSION < 3 Py_INCREF(__pyx_m); #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_integrate_cython) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "integrate_cython.pyx":1 * # -*- coding: utf-8 -*- # <<<<<<<<<<<<<< * #cython: boundscheck=False * #cython: wraparound=False */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { __Pyx_AddTraceback("init integrate_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init integrate_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AS_STRING(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; } else { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { #else if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { #endif goto invalid_keyword_type; } else { for (name = first_kw_arg; *name; name++) { #if PY_MAJOR_VERSION >= 3 if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && PyUnicode_Compare(**name, key) == 0) break; #else if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && _PyString_Eq(**name, key)) break; #endif } if (*name) { values[name-argnames] = value; } else { /* unexpected keyword found */ for (name=argnames; name != first_kw_arg; name++) { if (**name == key) goto arg_passed_twice; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; #else if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && _PyString_Eq(**name, key)) goto arg_passed_twice; #endif } if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } } } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, **name); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } #if PY_MAJOR_VERSION < 3 static PyObject *__Pyx_GetStdout(void) { PyObject *f = PySys_GetObject((char *)"stdout"); if (!f) { PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout"); } return f; } static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) { PyObject* v; int i; if (!f) { if (!(f = __Pyx_GetStdout())) return -1; } for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) { if (PyFile_SoftSpace(f, 1)) { if (PyFile_WriteString(" ", f) < 0) return -1; } v = PyTuple_GET_ITEM(arg_tuple, i); if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0) return -1; if (PyString_Check(v)) { char *s = PyString_AsString(v); Py_ssize_t len = PyString_Size(v); if (len > 0 && isspace(Py_CHARMASK(s[len-1])) && s[len-1] != ' ') PyFile_SoftSpace(f, 0); } } if (newline) { if (PyFile_WriteString("\n", f) < 0) return -1; PyFile_SoftSpace(f, 0); } return 0; } #else /* Python 3 has a print function */ static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) { PyObject* kwargs = 0; PyObject* result = 0; PyObject* end_string; if (unlikely(!__pyx_print)) { __pyx_print = __Pyx_GetAttrString(__pyx_b, "print"); if (!__pyx_print) return -1; } if (stream) { kwargs = PyDict_New(); if (unlikely(!kwargs)) return -1; if (unlikely(PyDict_SetItemString(kwargs, "file", stream) < 0)) goto bad; if (!newline) { end_string = PyUnicode_FromStringAndSize(" ", 1); if (unlikely(!end_string)) goto bad; if (PyDict_SetItemString(kwargs, "end", end_string) < 0) { Py_DECREF(end_string); goto bad; } Py_DECREF(end_string); } } else if (!newline) { if (unlikely(!__pyx_print_kwargs)) { __pyx_print_kwargs = PyDict_New(); if (unlikely(!__pyx_print_kwargs)) return -1; end_string = PyUnicode_FromStringAndSize(" ", 1); if (unlikely(!end_string)) return -1; if (PyDict_SetItemString(__pyx_print_kwargs, "end", end_string) < 0) { Py_DECREF(end_string); return -1; } Py_DECREF(end_string); } kwargs = __pyx_print_kwargs; } result = PyObject_Call(__pyx_print, arg_tuple, kwargs); if (unlikely(kwargs) && (kwargs != __pyx_print_kwargs)) Py_DECREF(kwargs); if (!result) return -1; Py_DECREF(result); return 0; bad: if (kwargs != __pyx_print_kwargs) Py_XDECREF(kwargs); return -1; } #endif #if PY_MAJOR_VERSION < 3 static int __Pyx_PrintOne(PyObject* f, PyObject *o) { if (!f) { if (!(f = __Pyx_GetStdout())) return -1; } if (PyFile_SoftSpace(f, 0)) { if (PyFile_WriteString(" ", f) < 0) return -1; } if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0) return -1; if (PyFile_WriteString("\n", f) < 0) return -1; return 0; /* the line below is just to avoid compiler * compiler warnings about unused functions */ return __Pyx_Print(f, NULL, 0); } #else /* Python 3 has a print function */ static int __Pyx_PrintOne(PyObject* stream, PyObject *o) { int res; PyObject* arg_tuple = PyTuple_New(1); if (unlikely(!arg_tuple)) return -1; Py_INCREF(o); PyTuple_SET_ITEM(arg_tuple, 0, o); res = __Pyx_Print(stream, arg_tuple, 1); Py_DECREF(arg_tuple); return res; } #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #include "compile.h" #include "frameobject.h" #include "traceback.h" static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, int __pyx_lineno, const char *__pyx_filename) { PyObject *py_srcfile = 0; PyObject *py_funcname = 0; PyObject *py_globals = 0; PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(__pyx_filename); #else py_srcfile = PyUnicode_FromString(__pyx_filename); #endif if (!py_srcfile) goto bad; if (__pyx_clineno) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_code = PyCode_New( 0, /*int argcount,*/ #if PY_MAJOR_VERSION >= 3 0, /*int kwonlyargcount,*/ #endif 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ __pyx_lineno, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); if (!py_code) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = __pyx_lineno; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */
GB_unaryop__abs_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_fp32 // op(A') function: GB_tran__abs_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_fp32 ( bool *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
EppsteinSubGraphAdaptive.h
#pragma once #include "../general.h" #include <gms/algorithms/preprocessing/preprocessing.h> namespace BkEppsteinSubGraphAdaptive { template <int Boundary, class SGraph, class Set = typename SGraph::Set> std::vector<Set> mceBench(const SGraph &rgraph, const pvector<NodeId> &ordering) { #ifdef BK_COUNT BK_CLIQUE_COUNTER = 0; //initialize counter #endif auto vCount = rgraph.num_nodes(); std::vector<Set> sol = {}; #pragma omp parallel for schedule(dynamic) shared(rgraph, sol, ordering) for (int v = 0; v < vCount; v++) { auto &neigh = rgraph.out_neigh(v); Set cand = {}; Set fini = {}; Set Q(v); for (auto w : neigh) { if (ordering[w] > ordering[v]) cand.union_inplace(w); else fini.union_inplace(w); } if (cand.cardinality() < Boundary) BkTomita::expand(cand, fini, Q, sol, rgraph); else BkEppsteinSubGraph::expandRelay(cand, fini, Q, sol, SGraphSubGraph(rgraph, v, cand, fini)); } return sol; } } // namespace BkEppsteinSubGraphAdaptive
TaskDispatcher.h
#include "nvtt.h" // OpenMP // http://en.wikipedia.org/wiki/OpenMP #if defined(HAVE_OPENMP) #include <omp.h> #endif // Gran Central Dispatch (GCD/libdispatch) // http://developer.apple.com/mac/library/documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html #if NV_OS_DARWIN && defined(HAVE_DISPATCH_H) //#define HAVE_GCD 1 //#include <dispatch/dispatch.h> #endif // Parallel Patterns Library (PPL) is part of Microsoft's concurrency runtime: // http://msdn.microsoft.com/en-us/library/dd504870.aspx #if NV_OS_WIN32 && _MSC_VER >= 1600 #define HAVE_PPL 1 #include <ppl.h> #endif // Intel Thread Building Blocks (TBB). // http://www.threadingbuildingblocks.org/ #if defined(HAVE_TBB) #include <tbb/parallel_for.h> #endif #include "nvthread/ParallelFor.h" namespace nvtt { struct SequentialTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { for (int i = 0; i < count; i++) { task(context, i); } } }; struct ParallelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { nv::ParallelFor parallelFor(task, context); parallelFor.run(count); // @@ Add support for custom grain. } }; #if defined(HAVE_OPENMP) struct OpenMPTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { #pragma omp parallel for for (int i = 0; i < count; i++) { task(context, i); } } }; #endif #if HAVE_GCD // Task dispatcher using Apple's Grand Central Dispatch. struct AppleTaskDispatcher : public TaskDispatcher { // @@ This is really lame, but I refuse to use size_t in the public API. struct BlockContext { Task * task; void * context; }; static void block(void * context, size_t id) { BlockContext * ctx = (BlockContext *)context; ctx->task(ctx->context, int(id)); } virtual void dispatch(Task * task, void * context, int count) { dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); BlockContext blockCtx = { task, context }; dispatch_apply_f(count, q, &blockCtx, block); } }; #endif #if defined(HAVE_PPL) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Microsoft's concurrency runtime. struct MicrosoftTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { TaskFunctor func(task, context); Concurrency::parallel_for(0, count, func); } }; #endif #if defined(HAVE_TBB) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int & n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Inte's Thread Building Blocks. struct IntelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { parallel_for(blocked_range<int>(0, count, 1), TaskFunctor(task, context)); } }; #endif #if defined(HAVE_OPENMP) typedef OpenMPTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_TBB) typedef IntelTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_PPL) typedef MicrosoftTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_GCD) typedef AppleTaskDispatcher ConcurrentTaskDispatcher; #else //typedef SequentialTaskDispatcher ConcurrentTaskDispatcher; typedef ParallelTaskDispatcher ConcurrentTaskDispatcher; #endif } // namespace nvtt
nstream-ua-target.c
/// /// Copyright (c) 2019, Intel Corporation /// Copyright (c) 2021, NVIDIA /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_openmp.h" OMP_REQUIRES(unified_address) int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_default_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_default_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; int host = omp_get_initial_device(); size_t bytes = length*sizeof(double); double * restrict A = omp_target_alloc(bytes, host); double * restrict B = omp_target_alloc(bytes, host); double * restrict C = omp_target_alloc(bytes, host); double scalar = 3.0; #pragma omp parallel for simd schedule(static) for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = prk_wtime(); OMP_TARGET( teams distribute parallel for simd schedule(static) device(device) ) for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = prk_wtime() - nstream_time; } omp_target_free(C, host); omp_target_free(B, host); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; #pragma omp parallel for reduction(+:asum) for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } omp_target_free(A, host); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
GB_unop__isinf_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isinf_bool_fp32) // op(A') function: GB (_unop_tran__isinf_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isinf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isinf (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isinf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isinf_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isinf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isinf_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_08__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_04__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; /// All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { clang::Module *Module = nullptr; bool ModuleInterface = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *&Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' Partition, ///< 'module partition X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); /// Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf ///< Condition in a constexpr if statement. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation()); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *&Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDefaultedMemberExceptionSpecs.empty() && "there shouldn't be any pending delayed defaulted member " "exception specs"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDefaultedMemberExceptionSpecs) SavedDefaultedMemberExceptionSpecs; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDefaultedMemberExceptionSpecs.swap( S.DelayedDefaultedMemberExceptionSpecs); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
otbSampleAugmentation.h
/* * Copyright (C) 2005-2019 Centre National d'Etudes Spatiales (CNES) * * This file is part of Orfeo Toolbox * * https://www.orfeo-toolbox.org/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef otbSampleAugmentation_h #define otbSampleAugmentation_h #ifdef _OPENMP # include <omp.h> #endif #include <vector> #include <algorithm> #include <random> #include <ctime> #include <cassert> namespace otb { namespace sampleAugmentation { using SampleType = std::vector<double>; using SampleVectorType = std::vector<SampleType>; /** Estimate standard deviations of the components in one pass using Welford's algorithm */ SampleType EstimateStds(const SampleVectorType& samples) { const auto nbSamples = samples.size(); const long nbComponents = static_cast<long>(samples[0].size()); SampleType stds(nbComponents, 0.0); SampleType means(nbComponents, 0.0); for(size_t i=0; i<nbSamples; ++i) { auto norm_factor = 1.0/(i+1); #ifdef _OPENMP #pragma omp parallel for #endif for(long j=0; j< nbComponents; ++j) { const auto mu = means[j]; const auto x = samples[i][j]; auto muNew = mu+(x-mu)*norm_factor; stds[j] += (x-mu)*(x-muNew); means[j] = muNew; } } #ifdef _OPENMP #pragma omp parallel for #endif for(long j=0; j< nbComponents; ++j) { stds[j] = std::sqrt(stds[j]/nbSamples); } return stds; } /** Create new samples by replicating input samples. We loop through * the input samples and add them to the new data set until nbSamples * are added. The elements of newSamples are removed before proceeding. */ void ReplicateSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples) { newSamples.resize(nbSamples); const long long nbSamplesLL = static_cast<long long>(nbSamples); size_t imod{0}; #ifdef _OPENMP #pragma omp parallel for #endif for(long long i=0; i< nbSamplesLL; ++i) { if (imod == inSamples.size()) imod = 0; newSamples[i] = inSamples[imod++]; } } /** Create new samples by adding noise to existing samples. Gaussian * noise is added to randomly selected samples. The standard deviation * of the noise added to each component is the same as the one of the * input variables divided by stdFactor (defaults to 10). The * elements of newSamples are removed before proceeding. */ void JitterSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, float stdFactor=10, const int seed = std::time(nullptr)) { newSamples.resize(nbSamples); const long nbComponents = static_cast<long>(inSamples[0].size()); std::random_device rd; std::mt19937 gen(rd()); // The input samples are selected randomly with replacement std::srand(seed); // We use one gaussian distribution per component since they may // have different stds auto stds = EstimateStds(inSamples); std::vector<std::normal_distribution<double>> gaussDis(nbComponents); #ifdef _OPENMP #pragma omp parallel for #endif for(long i=0; i< nbComponents; ++i) gaussDis[i] = std::normal_distribution<double>{0.0, stds[i]/stdFactor}; for(size_t i=0; i<nbSamples; ++i) { newSamples[i] = inSamples[std::rand()%inSamples.size()]; #ifdef _OPENMP #pragma omp parallel for #endif for(long j=0; j< nbComponents; ++j) newSamples[i][j] += gaussDis[j](gen); } } struct NeighborType { size_t index; double distance; }; struct NeighborSorter { constexpr bool operator ()(const NeighborType& a, const NeighborType& b) const { return b.distance > a.distance; } }; double ComputeSquareDistance(const SampleType& x, const SampleType& y) { assert(x.size()==y.size()); double dist{0}; for(size_t i=0; i<x.size(); ++i) { dist += (x[i]-y[i])*(x[i]-y[i]); } return dist/(x.size()*x.size()); } using NNIndicesType = std::vector<NeighborType>; using NNVectorType = std::vector<NNIndicesType>; /** Returns the indices of the nearest neighbors for each input sample */ void FindKNNIndices(const SampleVectorType& inSamples, const size_t nbNeighbors, NNVectorType& nnVector) { const long long nbSamples = static_cast<long long>(inSamples.size()); nnVector.resize(nbSamples); #ifdef _OPENMP #pragma omp parallel for #endif for(long long sampleIdx=0; sampleIdx< nbSamples; ++sampleIdx) { NNIndicesType nns; for(long long neighborIdx=0; neighborIdx<nbSamples; ++neighborIdx) { if(sampleIdx!=neighborIdx) nns.push_back({static_cast<size_t>(neighborIdx), ComputeSquareDistance(inSamples[sampleIdx], inSamples[neighborIdx])}); } std::partial_sort(nns.begin(), nns.begin()+nbNeighbors, nns.end(), NeighborSorter{}); nns.resize(nbNeighbors); nnVector[sampleIdx] = std::move(nns); } } /** Generate the new sample in the line linking s1 and s2 */ SampleType SmoteCombine(const SampleType& s1, const SampleType& s2, double position) { auto result = s1; for(size_t i=0; i<s1.size(); ++i) result[i] = s1[i]+(s2[i]-s1[i])*position; return result; } /** Create new samples using the SMOTE algorithm Chawla, N. V., Bowyer, K. W., Hall, L. O., & Kegelmeyer, W. P., Smote: synthetic minority over-sampling technique, Journal of artificial intelligence research, 16(), 321–357 (2002). http://dx.doi.org/10.1613/jair.953 */ void Smote(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, const int nbNeighbors, const int seed = std::time(nullptr)) { newSamples.resize(nbSamples); const long long nbSamplesLL = static_cast<long long>(nbSamples); NNVectorType nnVector; FindKNNIndices(inSamples, nbNeighbors, nnVector); // The input samples are selected randomly with replacement std::srand(seed); #ifdef _OPENMP #pragma omp parallel for #endif for(long long i=0; i< nbSamplesLL; ++i) { const auto sampleIdx = std::rand()%(inSamples.size()); const auto sample = inSamples[sampleIdx]; const auto neighborIdx = nnVector[sampleIdx][std::rand()%nbNeighbors].index; const auto neighbor = inSamples[neighborIdx]; newSamples[i] = SmoteCombine(sample, neighbor, std::rand()/double{RAND_MAX}); } } }//end namespaces sampleAugmentation }//end namespace otb #endif
ZQ_CNN_MTCNN_AspectRatio.h
#ifndef _ZQ_CNN_MTCNN_ASPECT_RATIO_H_ #define _ZQ_CNN_MTCNN_ASPECT_RATIO_H_ #pragma once #include "ZQ_CNN_Net.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN_AspectRatio { public: using string = std::string; ZQ_CNN_MTCNN_AspectRatio() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.8; nms_thresh[0] = 0.4; nms_thresh[1] = 0.5; nms_thresh[2] = 0.5; width = 0; height = 0; width_half = 0; height_half = 0; factor = 0.709; pnet_overlap_thresh_count = 3; pnet_size = 20; pnet_stride = 4; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; } ~ZQ_CNN_MTCNN_AspectRatio() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net> pnet, rnet, onet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; int width_half, height_half; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales, scales_xhalf, scales_yhalf; std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images, pnet_images_xhalf, pnet_images_yhalf; ZQ_CNN_Tensor4D_NHW_C_Align128bit input, input_xhalf, input_yhalf; ZQ_CNN_Tensor4D_NHW_C_Align128bit rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0) { limit_r_num = limit_r; limit_o_num = limit_o; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model, true, 1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len, pnet_model, pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.4, float nms_rthresh = 0.5, float nms_othresh = 0.5, float scale_factor = 0.709, int pnet_overlap_thresh_count = 3, int pnet_size = 20, int pnet_stride = 4, bool special_handle_very_big_face = false) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } if (width_half != w/2) { scales_xhalf.clear(); pnet_images_xhalf.clear(); width_half = w / 2; float minside = __min(width_half, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales_xhalf.push_back(m); minside *= factor; m *= factor; } minside = __min(width_half, height); int count = scales_xhalf.size(); for (int i = scales_xhalf.size() - 1; i >= 0; i--) { if (ceil(scales_xhalf[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales_xhalf.resize(count); if (count > 0) { float last_size = ceil(scales_xhalf[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales_xhalf.push_back((float)tmp_size / minside); count++; } } scales_xhalf.push_back((float)pnet_size / minside); count++; } else { scales_xhalf.push_back((float)pnet_size / minside); count++; } pnet_images_xhalf.resize(count); } if (height_half != h / 2) { scales_yhalf.clear(); pnet_images_yhalf.clear(); height_half = h / 2; float minside = __min(width, height_half); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales_yhalf.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height_half); int count = scales_yhalf.size(); for (int i = scales_yhalf.size() - 1; i >= 0; i--) { if (ceil(scales_yhalf[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales_yhalf.resize(count); if (count > 0) { float last_size = ceil(scales_yhalf[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales_yhalf.push_back((float)tmp_size / minside); count++; } } scales_yhalf.push_back((float)pnet_size / minside); count++; } else { scales_yhalf.push_back((float)pnet_size / minside); count++; } pnet_images_yhalf.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, _width, _height); } double t2 = omp_get_wtime(); if (!_Rnet_stage(firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, _width, _height); } double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } return true; } private: void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW, int& ori_num, int& xhalf_num, int& yhalf_num) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } ori_num = scale_num; scale_num = 0; for (int i = 0; i < scales_xhalf.size(); i++) { int changedH = (int)ceil(height*scales_xhalf[i]); int changedW = (int)ceil(width_half*scales_xhalf[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } xhalf_num = scale_num; scale_num = 0; for (int i = 0; i < scales_yhalf.size(); i++) { int changedH = (int)ceil(height_half*scales_yhalf[i]); int changedW = (int)ceil(width*scales_yhalf[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } yhalf_num = scale_num; int total_scale_num = ori_num + xhalf_num + yhalf_num; maps.resize(total_scale_num); for (int i = 0; i < total_scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < total_scale_num; i++) { if (i < ori_num) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } else if (i < ori_num + xhalf_num) { int j = i - ori_num; int changedH = (int)ceil(height*scales_xhalf[j]); int changedW = (int)ceil(width_half*scales_xhalf[j]); float cur_scale_x = (float)width_half / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales_xhalf[j] != 1) { input_xhalf.ResizeBilinear(pnet_images_xhalf[j], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales_xhalf[j] != 1) pnet[0].Forward(pnet_images_xhalf[j]); else pnet[0].Forward(input_xhalf); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } else { int k = i - ori_num - xhalf_num; int changedH = (int)ceil(height*scales_xhalf[k]); int changedW = (int)ceil(width_half*scales_xhalf[k]); float cur_scale_x = (float)width_half / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales_yhalf[k] != 1) { input_yhalf.ResizeBilinear(pnet_images_yhalf[k], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales_yhalf[k] != 1) pnet[0].Forward(pnet_images_yhalf[k]); else pnet[0].Forward(input_yhalf); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } } void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW, int& ori_num, int& xhalf_num, int& yhalf_num) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } for (int i = 0; i < scales_xhalf.size(); i++) { int changedH = (int)ceil(height*scales_xhalf[i]); int changedW = (int)ceil(width_half*scales_xhalf[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales_xhalf[i] != 1) { input_xhalf.ResizeBilinear(pnet_images_xhalf[i], changedW, changedH, 0, 0); } } for (int i = 0; i < scales_yhalf.size(); i++) { int changedH = (int)ceil(height_half*scales_yhalf[i]); int changedW = (int)ceil(width*scales_yhalf[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales_yhalf[i] != 1) { input_yhalf.ResizeBilinear(pnet_images_yhalf[i], changedW, changedH, 0, 0); } } } else { ori_num = scales.size(); xhalf_num = scales_xhalf.size(); yhalf_num = scales_yhalf.size(); int total_scale_num = ori_num + xhalf_num + yhalf_num; #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < total_scale_num; i++) { if (i < ori_num) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } else if (i < ori_num + xhalf_num) { int j = i - ori_num; int changedH = (int)ceil(height*scales_xhalf[j]); int changedW = (int)ceil(width_half*scales_xhalf[j]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales_xhalf[j] != 1) { input_xhalf.ResizeBilinear(pnet_images_xhalf[j], changedW, changedH, 0, 0); } } else { int k = i - ori_num - xhalf_num; int changedH = (int)ceil(height_half*scales_yhalf[k]); int changedW = (int)ceil(width*scales_yhalf[k]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales_yhalf[k] != 1) { input_yhalf.ResizeBilinear(pnet_images_yhalf[k], changedW, changedH, 0, 0); } } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } ori_num = scale_num; scale_num = 0; for (int i = 0; i < scales_xhalf.size(); i++) { int changedH = (int)ceil(height*scales_xhalf[i]); int changedW = (int)ceil(width_half*scales_xhalf[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } xhalf_num = scale_num; scale_num = 0; for (int i = 0; i < scales_yhalf.size(); i++) { int changedH = (int)ceil(height_half*scales_yhalf[i]); int changedW = (int)ceil(width*scales_yhalf[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } yhalf_num = scale_num; int total_scale_num = ori_num + xhalf_num + yhalf_num; maps.resize(total_scale_num); for (int i = 0; i < total_scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < total_scale_num; i++) { if (i < ori_num) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } else if (i < ori_num + xhalf_num) { int j = i - ori_num; int changeH = (int)ceil(height*scales_xhalf[j]); int changeW = (int)ceil(width_half*scales_xhalf[j]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales_xhalf[j]); task_scale_id.push_back(i); } } } } else { int k = i - ori_num - xhalf_num; int changeH = (int)ceil(height_half*scales_yhalf[k]); int changeW = (int)ceil(width*scales_yhalf[k]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales_yhalf[k]); task_scale_id.push_back(i); } } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id < ori_num) { if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } } else if (scale_id < ori_num + xhalf_num) { int j = scale_id - ori_num; if (j == 0 && scales_xhalf[0] == 1) { if (!input_xhalf.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images_xhalf[j].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } } else { int k = scale_id - ori_num - xhalf_num; if (k == 0 && scales_yhalf[0] == 1) { if (!input_yhalf.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images_yhalf[k].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (i < ori_num) { if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } } else if (i < ori_num + xhalf_num) { int j = i - ori_num; if (j == 0 && scales_xhalf[0] == 1) { if (!input_xhalf.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images_xhalf[j].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } } else { int k = i - ori_num - xhalf_num; if (k == 0 && scales_yhalf[0] == 1) { if (!input_yhalf.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images_yhalf[k].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); if (width != _width || height != _height) return false; if (!input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; if (!input_yhalf.ConvertFromBGR(bgr_img, width, height / 2, _widthStep * 2)) return false; std::vector<unsigned char> bgr_img_xhalf(width_half*_height * 3); int widthStep_half = width_half * 3; for (int i = 0; i < _height; i++) { for (int j = 0; j < width_half; j++) { bgr_img_xhalf[i*widthStep_half + j * 3 + 0] = bgr_img[i*_widthStep + j * 6 + 0]; bgr_img_xhalf[i*widthStep_half + j * 3 + 1] = bgr_img[i*_widthStep + j * 6 + 1]; bgr_img_xhalf[i*widthStep_half + j * 3 + 2] = bgr_img[i*_widthStep + j * 6 + 2]; } } if (!input_xhalf.ConvertFromBGR(&bgr_img_xhalf[0], width_half, height, widthStep_half)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; int ori_num, xhalf_num, yhalf_num; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(maps, mapH, mapW, ori_num, xhalf_num, yhalf_num); } else { _compute_Pnet_multi_thread(maps, mapH, mapW, ori_num, xhalf_num, yhalf_num); } int total_scale_num = ori_num + xhalf_num + yhalf_num; ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(total_scale_num); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(total_scale_num); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < total_scale_num; i++) { double t13 = omp_get_wtime(); int changedH, changedW; if (i < ori_num) { changedH = (int)ceil(height*scales[i]); changedW = (int)ceil(width*scales[i]); } else if (i < ori_num + xhalf_num) { int j = i - ori_num; changedH = (int)ceil(height*scales_xhalf[j]); changedW = (int)ceil(width_half*scales_xhalf[j]); } else { int k = i - ori_num - xhalf_num; changedH = (int)ceil(height_half*scales_yhalf[k]); changedW = (int)ceil(width*scales_yhalf[k]); } if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < total_scale_num; i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { if (i < ori_num) { it->scale_x = 1; it->scale_y = 1; } else if(i < ori_num + xhalf_num) { it->scale_x = 0.5; it->scale_y = 1; } else { it->scale_x = 1; it->scale_y = 0.5; } firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height, true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2)); return true; } bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); for (int i = 0; i < secondBbox.size(); i++) { float h = secondBbox[i].row2 - secondBbox[i].row1 + 1; float w = secondBbox[i].col2 - secondBbox[i].col1 + 1; float ratio = h / w; if (ratio > 1.5) { secondBbox[i].scale_x = 1; secondBbox[i].scale_y = 0.5; } else if (ratio < 1.0 / 1.5) { secondBbox[i].scale_x = 0.5; secondBbox[i].scale_y = 1; } else { secondBbox[i].scale_x = 1; secondBbox[i].scale_y = 1; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } for (int i = 0; i < secondBbox.size(); i++) { float h = secondBbox[i].row2 - secondBbox[i].row1 + 1; float w = secondBbox[i].col2 - secondBbox[i].col1 + 1; float ratio = h / w; if (ratio > 1.5) { secondBbox[i].scale_x = 1; secondBbox[i].scale_y = 0.5; } else if (ratio < 1.0 / 1.5) { secondBbox[i].scale_x = 0.5; secondBbox[i].scale_y = 1; } else { secondBbox[i].scale_x = 1; secondBbox[i].scale_y = 1; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
MD5_fmt.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 1996-2001,2008,2010-2012 by Solar Designer * * ...with changes in the jumbo patch, by bartavelle and magnum. * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. */ #include <string.h> #include "arch.h" #include "misc.h" #include "simd-intrinsics.h" #include "MD5_std.h" #include "common.h" #include "formats.h" #include "cryptmd5_common.h" #if defined(_OPENMP) && defined(SIMD_PARA_MD5) #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "md5crypt" #define FORMAT_NAME "crypt(3) $1$" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 15 #define CIPHERTEXT_LENGTH 22 #ifdef SIMD_PARA_MD5 #define BINARY_SIZE 16 #else #define BINARY_SIZE 4 #endif #define BINARY_ALIGN 4 #define SALT_SIZE 9 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT MD5_N #define MAX_KEYS_PER_CRYPT MD5_N static struct fmt_tests tests[] = { {"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"}, {"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"}, {"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"}, {"$1$$qRPK7m23GJusamGpoGLby/", ""}, {"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"}, {"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"}, {"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"}, {"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""}, {"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"}, {"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"}, {"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"}, {"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"}, {"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"}, {"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"}, {"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"}, {"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"}, {"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"}, {"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"}, {"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"}, {"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"}, /* following hashes are AIX non-standard smd5 hashes */ {"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"}, {"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"}, {"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"}, /* following hashes are AIX standard smd5 hashes (with corrected tag) * lpa_options = std_hash=true */ {"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"}, {"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"}, {"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"}, {"$1$27iyq7Ya$miN09fW1Scj0DHVNyewoU/", ""}, {"$1$84Othc1n$v1cuReaa5lRdGuHaOa76n0", "a"}, {"$1$4zq0BsCR$U2ua9WZtDEhzy4gFSiLxN1", "aa"}, {"$1$DKwjKWxp$PY6PdlPZsXjOppPDoFOz4.", "aaa"}, {"$1$OKDV6ppN$viTVmH48bSePiCrMvXT/./", "aaaa"}, {"$1$QEWsCY0O$xrTTMKTepiHMp7Oxgz0pX/", "aaaaa"}, {"$1$5dfdk2dF$XiJBPNrfKcCgdQ/kcoB40/", "aaaaaa"}, {"$1$Ps6A1Cy6$WsvLg9cQhm9JU0rXkLEtz.", "aaaaaaa"}, {"$1$9IK7nZ4M$4nx7Mdj05KGPJX/mZaDrh.", "aaaaaaaa"}, {"$1$l3pNTqwT$GAc.dcRaxCvC20CFGCjp4/", "aaaaaaaaa"}, {"$1$jSAARhJR$6daQ/ekjAL0MgOUgGJyp10", "aaaaaaaaaa"}, {"$1$wk3Xwqqg$2AtdiucwJvJgbaVT1jWpb0", "aaaaaaaaaaa"}, {"$1$G6Fn69Ei$d7AKJUOIdz/gO4Utc0TQP1", "aaaaaaaaaaaa"}, {"$1$A7XJ7lGK$W5jTnH/4lW4XwZ.6F7n1N.", "aaaaaaaaaaaaa"}, {"$1$Rcm46RfA$LfdIK/OP16yHzMYHSlx/B.", "aaaaaaaaaaaaaa"}, {"$1$4bCSSJMN$TcYKTsukD4SFJE1n4MwMZ/", "aaaaaaaaaaaaaaa"}, #if PLAINTEXT_LENGTH > 15 {"$1$mJxBkkl8$u7OHfWCPmNxvf0um7hH89.", "aaaaaaaaaaaaaaaa"}, {"$1$Ub1gBUt4$TNaLxU7Pq5mk/MiDEb60b/", "aaaaaaaaaaaaaaaaa"}, {"$1$8ot7QScR$x.p4vjIgdFxxS83x29PkJ0", "aaaaaaaaaaaaaaaaaa"}, {"$1$wRi4OjD3$eJjKD2AwLMWfOTRYA30zn.", "aaaaaaaaaaaaaaaaaaa"}, {"$1$lmektrsg$2KSRY4EUFzsYNMg80fG4/0", "aaaaaaaaaaaaaaaaaaaa"}, {"$1$tgVBKBmE$YRvzsi7qHP2MC1Atg8VCV.", "aaaaaaaaaaaaaaaaaaaaa"}, {"$1$oTsk88YC$Eh435T1BQzmjQekfqkHof/", "aaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ykxSZEfP$hJrFeGOFk049L.94Mgggj/", "aaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$LBK4p5tD$5/gAIx8/7hpTVwDC/.KQv/", "aaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$fkEasaUI$G7CelOWHkol2nVHN8XQP40", "aaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$gRevVzeY$eMMQrsl5OHL5dP1p/ktJc/", "aaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$164TNEjj$ppoV6Ju6Vu63j1OlM4zit/", "aaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ErPmhjp2$lZZstb2M455Xhk50eeH4i/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NUssS5fT$QaS4Ywt0IwzxbE0FAGnXn0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NxlTyiJ7$gxkXTEJdeTzY8P6tqKmcz.", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$Cmy9x7gW$kamvHI42Kh1CH4Shy6g6S/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$IsuapfCX$4Yq0Adq5nNZgl0LwbSl5Y0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$rSZfNcKX$N4XPvGrfhKsyoEcRSaqmG0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, #endif {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_PARA_MD5 static unsigned char cursalt[SALT_SIZE]; static int CryptType; static MD5_word (*sout); static int omp_para = 1; #endif static void init(struct fmt_main *self) { MD5_std_init(self); #if defined(_OPENMP) && defined(SIMD_PARA_MD5) omp_para = omp_get_max_threads(); if (omp_para < 1) omp_para = 1; self->params.min_keys_per_crypt = MD5_N * omp_para; omp_para *= OMP_SCALE; self->params.max_keys_per_crypt = MD5_N * omp_para; #elif MD5_std_mt self->params.min_keys_per_crypt = MD5_std_min_kpc; self->params.max_keys_per_crypt = MD5_std_max_kpc; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_CACHE); #ifdef SIMD_PARA_MD5 sout = mem_calloc(self->params.max_keys_per_crypt, sizeof(*sout) * BINARY_SIZE); #endif } static void done(void) { #ifdef SIMD_PARA_MD5 MEM_FREE(sout); #endif MEM_FREE(saved_key); } static int get_hash_0(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_0; #else init_t(); return MD5_out[index][0] & PH_MASK_0; #endif } static int get_hash_1(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_1; #else init_t(); return MD5_out[index][0] & PH_MASK_1; #endif } static int get_hash_2(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_2; #else init_t(); return MD5_out[index][0] & PH_MASK_2; #endif } static int get_hash_3(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_3; #else init_t(); return MD5_out[index][0] & PH_MASK_3; #endif } static int get_hash_4(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_4; #else init_t(); return MD5_out[index][0] & PH_MASK_4; #endif } static int get_hash_5(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_5; #else init_t(); return MD5_out[index][0] & PH_MASK_5; #endif } static int get_hash_6(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_6; #else init_t(); return MD5_out[index][0] & PH_MASK_6; #endif } static int salt_hash(void *salt) { unsigned int i, h, retval; retval = 0; for (i = 0; i <= 6; i += 2) { h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i + 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])]; h ^= ((unsigned char *)salt)[i]; retval += h; } retval ^= retval >> SALT_HASH_LOG; retval &= SALT_HASH_SIZE - 1; return retval; } static void set_key(char *key, int index) { #ifndef SIMD_PARA_MD5 MD5_std_set_key(key, index); #endif strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH); } static char *get_key(int index) { saved_key[index][PLAINTEXT_LENGTH] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_PARA_MD5 #ifdef _OPENMP int t; #pragma omp parallel for for (t = 0; t < omp_para; t++) md5cryptsse((unsigned char *)(&saved_key[t*MD5_N]), cursalt, (char *)(&sout[t*MD5_N*BINARY_SIZE/sizeof(MD5_word)]), CryptType); #else md5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType); #endif #else MD5_std_crypt(count); #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_PARA_MD5 unsigned int x,y; for(y=0;y<SIMD_PARA_MD5*omp_para;y++) for(x=0;x<SIMD_COEF_32;x++) { if( ((MD5_word *)binary)[0] == ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] ) return 1; } return 0; #else #if MD5_std_mt int t, n = (count + (MD5_N - 1)) / MD5_N; #endif for_each_t(n) { #if MD5_X2 if (*(MD5_word *)binary == MD5_out[0][0] || *(MD5_word *)binary == MD5_out[1][0]) return 1; #else if (*(MD5_word *)binary == MD5_out[0][0]) return 1; #endif } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; if(((unsigned int*)binary)[0] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+0*SIMD_COEF_32]) return 0; if(((unsigned int*)binary)[1] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+1*SIMD_COEF_32]) return 0; if(((unsigned int*)binary)[2] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32]) return 0; if(((unsigned int*)binary)[3] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32]) return 0; return 1; #else init_t(); return *(MD5_word *)binary == MD5_out[index][0]; #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_PARA_MD5 return 1; #else init_t(); return !memcmp(MD5_std_get_binary(source), MD5_out[index], sizeof(MD5_binary)); #endif } static void set_salt(void *salt) { #ifdef SIMD_PARA_MD5 memcpy(cursalt, salt, SALT_SIZE); CryptType = cursalt[8]; cursalt[8] = 0; #endif MD5_std_set_salt(salt); } static void *get_salt(char *ciphertext) { return MD5_std_get_salt(ciphertext); } static void *get_binary(char *ciphertext) { return MD5_std_get_binary(ciphertext); } struct fmt_main fmt_MD5 = { { FORMAT_LABEL, FORMAT_NAME, "MD5 " MD5_ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if MD5_std_mt || defined(SIMD_PARA_MD5) FMT_OMP | #endif FMT_CASE | FMT_8_BIT, { NULL }, { md5_salt_prefix, apr1_salt_prefix, smd5_salt_prefix }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, cryptmd5_common_valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } };
convolution_gemm.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /** \file * jitconv "doRef" calculation - a simplified gemm impl. * * This should be correct, and may even be somewhat fast "in general". * Do not expect "fastest" performance. * * libvednn has a more sophisticated GEMM convolution. */ #include "vednn_helper.h" #include "convolution_gemm.h" #include <string.h> #include <stdlib.h> #include <cblas.h> #define LOCAL_FTRACE 1 #if LOCAL_FTRACE #include "conv_test_param.h" // just for FTRACE macros #define LFTRACE_BEGIN(...) FTRACE_BEGIN(__VA_ARGS__) #define LFTRACE_END(...) FTRACE_END(__VA_ARGS__) #define LFTRACE_IF(...) FTRACE_IF(__VA_ARGS__) #else #define LFTRACE_BEGIN(...) do{}while(0) #define LFTRACE_END(...) do{}while(0) #define LFTRACE_IF(...) do{}while(0) #endif // LOCAL_FTRACE // Scratchpad: none -- hardwired to malloc/free im2col buffers // slower, but safer. #define GEMM_PARA_THRESH 32768 // Around Jan. 2022, ncc-3.4.20, SGEMM began to segfault for M==1, // so a workaround writes out the trivial matrix multiply. /// 0 : SGEMM works great /// 1 : safe macros /// 2: dev code (generic) #define SGEMM_M1_SEGFAULTS 1 /// 0 : old code, bias via sgemm (w/ problems) /// 1 : just use cblas_saxpy alternate (simpler and avoids sgemm bug!) #define BIAS_SAXPY 0 #if SGEMM_M1_SEGFAULTS==0 // issues ncc-3.4.20 and M==1 ??? #define SGEMM sgemm_ #define SGEMM_A1B0 sgemm_ #define SGEMM_A1B1K1 sgemm_ #define SGEMM_A1B0t sgemm_ #define SGEMM_A1tB1 sgemm_ #else #define SGEMM sgemm_ // dangerous now - require bug workaround for M=1 #define SGEMM_A1B0 SGEMM_SAFE_A1B0 #define SGEMM_A1B1K1 SGEMM_SAFE_A1B1K1 #define SGEMM_A1B0t SGEMM_SAFE_A1B0t #define SGEMM_A1tB1 SGEMM_SAFE_A1tB1 #endif /// A workaround for SGEMM M==1 segfaults. (circa ncc-3.4.20, Jan 2022) /// For alpha=1, beta=0 (main gemm calculation) #define SGEMM_SAFE_A1B0(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC) \ do { \ if(*(M) > 1){ \ sgemm_(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC); \ }else{ \ int const NN = *(N); \ /*int const MM = *(M);*/ \ int const KK = *(K); \ if(*(M) == 1 && *(K) > 1){ /* using just M==1 */ \ _Pragma("omp parallel if(NN * KK > 32768)") /* C99 */ \ for (int n=0; n < (NN); ++n) { \ float acc = 0.0f; \ for (int k=0; k < (KK); ++k) { \ acc += (A)[k * (NN) + n] * (B)[k]; \ } \ (C)[n] = acc; /* M==1 && beta==0.0 : no accumulation into C */ \ } \ }else{ /* M=1, K=1 */ \ _Pragma("omp parallel if((NN) > 32768)") \ for (int n=0; n < *(N); ++n) { \ (C)[n] = (A)[n] * (B)[0]; \ } \ } \ } \ }while(0) /// Backward Data also has alpha=1, beta=0, but B[] is transposed // XXX test with jitconv -T BackwardData #define SGEMM_SAFE_A1B0t(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC) \ do { \ if(*(M) > 1){ \ sgemm_(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC); \ }else{ \ /* for M=1, B is K x 1, so vector ignoring the transpose is OK */ \ int const NN = *(N); \ /*int const MM = *(M);*/ \ int const KK = *(K); \ if(*(M) == 1 && *(K) > 1){ /* using just M==1 */ \ _Pragma("omp parallel if(NN * KK > 32768)") /* C99 */ \ for (int n=0; n < (NN); ++n) { \ float acc = 0.0f; \ for (int k=0; k < (KK); ++k) { \ acc += (A)[k * (NN) + n] * (B)[k]; \ } \ (C)[n] = acc; /* M==1 && beta==0.0 : no accumulation into C */ \ } \ }else{ /* M=1, K=1 */ \ _Pragma("omp parallel if((NN) > 32768)") \ for (int n=0; n < *(N); ++n) { \ (C)[n] = (A)[n] * (B)[0]; \ } \ } \ } \ }while(0) /// for BackwardFilter, alpha=1, beta=1 and A is transposed // XXX test with jitconv -T BackwardFilter // try only a single omp || ? // needs testing of ALL impls (update jitconv testBackwardFilter!!!) #define SGEMM_SAFE_A1tB1_0(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC) \ do { \ /*printf(" A1tB1 N=%d M=%d K=%d\n",*(N),*(M),*(K)); fflush(stdout);*/ \ if(*(M) > 1){ \ sgemm_(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC); \ }else{ \ /* for M=1, A is N x K, so need A transpose wrt. Forward impl */ \ int const NN = *(N); \ int const KK = *(K); \ int const NNKK = NN*KK; \ /* M==1, any K */ \ _Pragma("omp parallel if(NNKK > 32768)") /* C99 */ \ for (int n=0; n < (NN); ++n) { \ float acc = 0.0f; \ for (int k=0; k < (KK); ++k) { \ acc += (A)[n * (KK) + k] * (B)[k]; \ } \ (C)[n] += acc; /* beta=1 accumulation into C */ \ } \ } \ }while(0) #define SGEMM_SAFE_A1tB1(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC) \ do { \ /*printf(" A1tB1 N=%d M=%d K=%d\n",*(N),*(M),*(K)); fflush(stdout);*/ \ if(*(M) > 1){ \ sgemm_(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC); \ }else{ \ /* for M=1, A is N x K, so need A transpose wrt. Forward impl */ \ int const NN = *(N); \ int const KK = *(K); \ if(*(M) == 1 && *(K) > 1){ /* using just M==1 */ \ /*_Pragma("omp parallel if(NN * KK > 32768)")*/ /* C99 */ \ for (int n=0; n < (NN); ++n) { \ float acc = 0.0f; \ for (int k=0; k < (KK); ++k) { \ acc += (A)[n * (KK) + k] * (B)[k]; \ } \ (C)[n] += acc; /* beta=1 accumulation into C */ \ } \ }else{ /* M=1, K=1 */ \ /*_Pragma("omp parallel if((NN) > 32768)")*/ \ for (int n=0; n < (NN); ++n) { \ (C)[n] += (A)[n] * (B)[0]; /* beta=1 accum */ \ } \ } \ } \ }while(0) // using M==1 and K==1 // here A[N] is 1.0 /// workaround for M=1 bias segfault. /// Here K=1, alpha=1, beta=1, \b and A[] is all-1.0 (bias accumulation) #define SGEMM_SAFE_A1B1K1(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC) \ do { \ if(1 && *(M) > 1) /* always elide? */ \ { \ sgemm_(TRANSA,TRANSB, N,M,K, ALPHA,A,LDA, B,LDB, BETA,C,LDC); \ }else{ \ int const MN = *(M) * *(N); \ float const B_0 = *(B);/* (B)[0] */ \ /* wrong output if try to parallelize? */ \ /* _Pragma("omp parallel if(MN > 32768)") */ \ for (int mn=0; mn < MN; ++mn) { \ (C)[mn] += B_0; \ } \ } \ }while(0) #if 0 #define DBG(...) do{printf(__VA_ARGS__);fflush(stdout);}while(0) #else #define DBG(...) #endif #if 1 void sgemm_(char *TRANSA, char *TRANSB, int *M, int *N, int *K, float *ALPHA, float *A, int *LDA, float *B, int *LDB, float *BETA, float *C, int *LDC ) ; //void cblas_saxpy(const int N, const float alpha, const float *X, // const int incX, float *Y, const int incY); #endif static char TRANS = 'T'; static char NOTRANS = 'N'; static float FONE = 1.0f; static float FZERO = 0.0f; static int IONE = 1; /* ----------------------------------------------------------------------- */ static inline int is_a_ge_zero_and_a_lt_b(int a, int b) { //return (unsigned)a < (unsigned)b; return a>=0 && a<b; // for ncc auto vectorization, this is better } static void #if 0 im2col_cpu(const float * restrict data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int output_h, const int output_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float * restrict data_col) #else im2col_cpu(const float * data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int output_h, const int output_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float * data_col) #endif { LFTRACE_BEGIN("im2col_cpu"); #if 0 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; #endif const int channel_size = height * width; int channel; if (0){ // Note: alloc is for ic * ih * kw, but probably only need (ic/g) * kh * kw here? DBG("im2col_cpu _alloc needs %llu floats\n",(long long)channels*channel_size); for(size_t i=0; i<(size_t)channels*(size_t)channel_size; ++i){ data_col[i] = 0.0f; } DBG("data_col / pColBuf accessible"); } //#pragma omp parallel for if(channels>=3) for (channel = 0 ; channel < channels; channel++) { // inChannel //printf(" i2c c%d/%d",channel,channels); fflush(stdout); int kernel_row, kernel_col, output_rows, output_cols, output_col; int inOffset = channel * channel_size; int outOffset = channel * output_h * output_w * kernel_h * kernel_w; for (kernel_row = 0; kernel_row < kernel_h; kernel_row++) { // kernHeight for (kernel_col = 0; kernel_col < kernel_w; kernel_col++) { // kernWidth int input_row = -pad_h + kernel_row * dilation_h; for (output_rows = output_h; output_rows; output_rows--) { // outHeight if (!is_a_ge_zero_and_a_lt_b(input_row, height)) { for (output_cols = output_w; output_cols; output_cols--) { // *(data_col++) = 0; data_col[outOffset++] = 0.f; } } else { int input_col = -pad_w + kernel_col * dilation_w; // following still bombed //#pragma _NEC novector for (output_col = output_w; output_col; output_col--) { // outWidth #if 1 // newer data_col[outOffset++] //*(data_col++) = (is_a_ge_zero_and_a_lt_b(input_col, width) ? data_im[inOffset + input_row * width + input_col] : 0.f); #else // older if (outOffset < 0 || outOffset >= channels*kernel_h*kernel_w*output_h*output_w){ printf("ERROR: outOffset"); fflush(stdout); exit(-1); } if (is_a_ge_zero_and_a_lt_b(input_col, width)) { // *(data_col++) = data_im[input_row * width + input_col]; data_col[outOffset] = data_im[inOffset + input_row * width + input_col]; } else { // *(data_col++) = 0; data_col[outOffset] = 0; } ++outOffset; #endif input_col += stride_w; } } input_row += stride_h; } } } } LFTRACE_END("im2col_cpu"); } static void col2im_cpu( const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int output_h, const int output_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_im) { LFTRACE_BEGIN("col2im_cpu"); memset(data_im, 0, sizeof(float)*height*width*channels) ; #if 0 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; #endif const int channel_size = height * width; int channel; #pragma omp parallel for if(channels>=3) for (channel = 0 ; channel < channels; channel++) { // inChannel int kernel_row, kernel_col, output_rows, output_cols, output_col; int inOffset = channel * channel_size; int outOffset = channel * output_h * output_w * kernel_h * kernel_w; for (kernel_row = 0; kernel_row < kernel_h; kernel_row++) { // kernHeight for (kernel_col = 0; kernel_col < kernel_w; kernel_col++) { // kernWidth int input_row = -pad_h + kernel_row * dilation_h; for (output_rows = output_h; output_rows; output_rows--) { // outHeight if (!is_a_ge_zero_and_a_lt_b(input_row, height)) { for (output_cols = output_w; output_cols; output_cols--) { data_col[outOffset++] ; } } else { int input_col = -pad_w + kernel_col * dilation_w; for (output_col = output_w; output_col; output_col--) { // outWidth if (is_a_ge_zero_and_a_lt_b(input_col, width)) { data_im[inOffset + input_row * width + input_col] += data_col[outOffset++] ; } else { outOffset++ ; } input_col += stride_w; } } input_row += stride_h; } } } } LFTRACE_END("col2im_cpu"); } // pOne is oh*ow of 1.0f // pColBuff is scratch of ic*kw*kh * ow*oh * iw*ih (huge, in this version) see conv_test_param.c vednnError_t convolution_forward_gemm( const vednnTensorParam_t * restrict pParamIn, const void * restrict pDataIn, const vednnFilterParam_t * restrict pParamKernel, const void * restrict pDataKernel, const vednnBiasParam_t * restrict pParamBias, const void * restrict pDataBias, const vednnTensorParam_t * restrict pParamOut, void * restrict pDataOut, //const float * restrict pOne, float * restrict pColBuff, const float * pOne, float * pColBuff, const vednnConvolutionParam_t * restrict pParamConv ) { LFTRACE_BEGIN("convolution_forward_gemm"); int batch = pParamIn->batch; int inChannel = pParamIn->channel; int inWidth = pParamIn->width; int inHeight = pParamIn->height; int outChannel = pParamOut->channel; int outWidth = pParamOut->width; int outHeight = pParamOut->height; int kernWidth = pParamKernel->width; int kernHeight = pParamKernel->height; int group = pParamConv->group; int strideWidth = pParamConv->strideWidth;; int strideHeight = pParamConv->strideHeight; int padWidth = pParamConv->padWidth; int padHeight = pParamConv->padHeight; int dilationWidth = pParamConv->dilationWidth; int dilationHeight = pParamConv->dilationHeight; int inChannelGroup = inChannel / group; // pParamKernel->inChannel int outChannelGroup = outChannel / group; // pParamKernel->outChannel int no_im2col = (kernWidth == 1 && kernHeight == 1 && strideWidth == 1 && strideHeight == 1 && padWidth == 0 && padHeight == 0); if (0){ printf("mb=%d g=%d ic=%d ic/g=%d noi2c=%d", batch,group,inChannel,inChannelGroup, no_im2col); fflush(stdout); float const* pk = (float const*)(pDataKernel); // assume float (debug) XXX int const ksz = group * outChannelGroup * inChannelGroup * kernHeight * kernWidth; size_t const ksz2 = getKernelSize(pParamKernel) * pParamConv->group; if (ksz != ksz2) { printf("ksz %d != ksz2 %d\n"); } for(int i=0; i<ksz; ++i){ if (isnan(pk[i])) { printf("generateRandomData --> nans!\n"); printf("ksz=%d i=%d\n"); exit(-1); } if (pk[i] < -5.0 || pk[i] > +5.0){ printf("generateRandomData --> outside [-5.0,5.0]\n"); printf("ksz=%d i=%d\n"); exit(-1); } } printf("input pDataKernel[0..%d - 1] looks good\n",ksz); printf("input pDataKernel[0..%lu - 1] looks good\n",(long unsigned)ksz2); } float * transformed_filter = NULL ; if( pParamKernel->layout == VEDNN_FILTER_LAYOUT_HWCN ) { // only support group=1 if (group!=1){ printf("Unsupported ref calc: HWCN wants group==1"); exit(-1); //return VEDNN_ERROR_INVALID_PARAM; } const int N = outChannel ; const int C = inChannel ; const int H = kernHeight ; const int W = kernWidth ; float * filter = (float *) pDataKernel ; transformed_filter = (float *) malloc(sizeof(float)*N*C*H*W) ; #pragma omp parallel for for(int n=0; n<N ; n++) { for(int c=0; c<C ; c++) { for(int hw=0; hw<H*W ; hw++) { transformed_filter[((n*C+c)*H)*W+hw] = filter[((hw)*C+c)*N+n] ; } } } } const float * restrict pIn = pDataIn; const float * restrict pBias = pDataBias; const float * restrict pKernel = transformed_filter == NULL ? pDataKernel : transformed_filter ; float * restrict pOut = pDataOut; for (int n = 0; n < batch; n++) { // this->num_ int inBatchOffset = n * inChannel * inWidth * inHeight; int outBatchOffset = n * outChannel * outWidth * outHeight; for (int g = 0; g < group; g++) { int inGroupOffset = g * inChannelGroup * inHeight * inWidth; int outGroupOffset = g * outChannelGroup * outHeight * outWidth; int kernGroupOffset = g * outChannelGroup * inChannelGroup * kernHeight * kernWidth; int biasGroupOffset = g * outChannelGroup; int inOffset = inBatchOffset + inGroupOffset; int outOffset = outBatchOffset + outGroupOffset; if (no_im2col) { int M = outChannelGroup; int N = outWidth * outHeight; int K = inChannelGroup; int LDA = inWidth * inHeight; //printf(" M=%d N=%d K=%d ",M,N,K); #if SGEMM_M1_SEGFAULTS==0 // issues ncc-3.4.20 and M==1 ??? SGEMM(&NOTRANS, &NOTRANS, &N, &M, &K, &FONE, (float *) &pIn[inOffset], &LDA, (float *) &pKernel[kernGroupOffset], &K, &FZERO, &pOut[outOffset], &N); #elif SGEMM_M1_SEGFAULTS==1 // issues ncc-3.4.20 and M==1 ??? SGEMM_A1B0(&NOTRANS, &NOTRANS, &N, &M, &K, &FONE, (float *) &pIn[inOffset], &LDA, (float *) &pKernel[kernGroupOffset], &K, &FZERO, &pOut[outOffset], &N); #else // ncc-3.4.20 fixup... Here I show general equivalent for the SGEMM... if (M>1) { // || K>1) { SGEMM(&NOTRANS, &NOTRANS, &N, &M, &K, &FONE, (float *) &pIn[inOffset], &LDA, // LDA=N for no_im2col (float *) &pKernel[kernGroupOffset], &K, // LDB=K for no_im2col &FZERO, &pOut[outOffset], &N); }else{ // M==1 has some BLAS segv !!! just write it out for now... // At -O3 and -O4, ncc should have -fassociative-math -fmatrix-multiply // and should emit matrix-multiply code for these // consulting sgemm docs // 3rd dim is the summation index float const* A = (float const*)&pIn[inOffset]; // size N x K float const* B = (float const*)&pKernel[kernGroupOffset]; // size K x M float * C = &pOut[outOffset]; // size N x M #if 0 // generic, long-hand matrix multiply if (M>1) { // actually this is NOT quite right yet :( for (int n=0; n<N; n++) { for (int m=0; m<M; m++) { C[m*N + n] = 0.0f; } } for (int n=0; n<N; n++) { for (int m=0; m<M; m++) { float acc = 0.0f; for (int k=0; k<K; k++) { acc += A[k*N + n] * B[m*K + k]; } C[m*N + n] += acc; // beta=0 } } }else #endif #if 1 if(M==1 && K>1){ // using just M==1 #pragma omp parallel if(N*K>GEMM_PARA_THRESH) for (int n=0; n<N; n++) { float acc = 0.0f; for (int k=0; k<K; k++) { acc += A[k*N+n] * B[k]; } C[n] = acc; // beta=0, no accumulation into C } }else #endif { // using M==1 and K==1 float b0 = B[0]; #pragma omp parallel if(N*K>GEMM_PARA_THRESH) for (int n=0; n<N; n++) { C[n] = A[n] * b0; } } } #endif if (pBias) { //printf("noi2c pBias M=%d N=%d K=%d ", M,N,K); #if SGEMM_M1_SEGFAULTS==0 // issues ncc-3.4.20 and M==1 ??? SGEMM(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *) pOne, &N, (float *) &pBias[biasGroupOffset], &IONE, &FONE, &pOut[outOffset], &N); #elif SGEMM_M1_SEGFAULTS==1 // issues ncc-3.4.20 and M==1 ??? #if BIAS_SAXPY==0 SGEMM_A1B1K1(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *) pOne, &N, // N x 1 (float *) &pBias[biasGroupOffset], &IONE, // 1 x M &FONE, &pOut[outOffset], &N); // N x M #else // note that this might be formulated as a // SAXPY( N=MN, // ALPHA=1.0, // X=&pBias[biasGroupOffset], // INCX=0, /* <-- "add constant" */ // Y=&pOut[outOffset], /* add to this */ // INCY=1 // ) // Unfortunately, this only handles for M=1 if (M>1) { SGEMM(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *) pOne, &N, (float *) &pBias[biasGroupOffset], &IONE, &FONE, &pOut[outOffset], &N); }else{ //printf(" saxpy M=%d N=%d\n", M,N); cblas_saxpy( N, 1.0, &pBias[biasGroupOffset], 0, &pOut[outOffset], 1); } #endif #elif 0 // debug { // M==1 has some BLAS segv !!! just write it out for now... // K=1 summation index is a huge simplification // We might want to never fully call the SGEMM! //float const* A = (float const*)pOne; // size N x K float const* B = (float const*)&pBias[biasGroupOffset]; // size K x M float * C = &pOut[outOffset]; // size N x M if (1) { // further simplificcation only elides 1 scalar multiply //for (int n=0; n<N; n++) for (int m=0; m<M; m++) C[m*N + n] = 0.0f; #if 0 // version 0, all loops before simplification printf("x7"); int const K = 1; for (int n=0; n<N; n++) { for (int m=0; m<M; m++) { float acc = 0.0f; for (int k=0; k<K; k++) { acc += 1.0/* A[k*N + n] */ * B[m*K + k]; } C[m*N + n] += acc; // beta=0 } } #elif 0 // K=1 is a drastic simplification printf("x8"); for (int m=0; m<M; ++m) { for (int n=0; n<N; ++n) { C[m*N + n] += B[0]; // beta=0 } } #else // not working with omp parllel DBG("x9"); int const MN = M * N; //#pragma omp parallel if(MN > GEMM_PARA_THRESH) /* this cause wrong output */ for (int mn=0; mn < MN; ++mn) { C[mn] += B[0]; // beta=0 } } } #endif #else // dev code, summarized // maybe it's faster to always elide the SGEMM? if (M>1) { SGEMM(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *) pOne, &N, (float *) &pBias[biasGroupOffset], &IONE, &FONE, &pOut[outOffset], &N); }else{ // workaround for M=1 segfault circa ncc 3.4.20 // using M==1 and K==1 // here A[N] is 1.0 float b0 = pBias[biasGroupOffset+0]; // B[0] int const MN = M * N; /* _Pragma("omp parallel if(MN > 32768)") //wrong output? */ for (int mn=0; mn < MN; ++mn) { pOut[outOffset+mn] += b0; // C[n] } } #endif }// if pBias } else { int M = outChannelGroup; int N = outWidth * outHeight; int K = inChannelGroup * kernWidth * kernHeight; im2col_cpu(&pIn[inOffset], inChannelGroup, inHeight, inWidth, kernHeight, kernWidth, outHeight, outWidth, padHeight, padWidth, strideHeight, strideWidth, dilationHeight, dilationWidth, pColBuff); #if SGEMM_M1_SEGFAULTS==0 // issues ncc-3.4.20 and M==1 ??? SGEMM(&NOTRANS, &NOTRANS, &N, &M, &K, &FONE, pColBuff, &N, (float *)&pKernel[kernGroupOffset], &K, &FZERO, &pOut[outOffset], &N); // segfault if M==1, at least w/ ncc 3.4.20 etc. #elif SGEMM_M1_SEGFAULTS==1 // issues ncc-3.4.20 and M==1 ??? SGEMM_A1B0(&NOTRANS, &NOTRANS, &N, &M, &K, &FONE, pColBuff, &N, (float *)&pKernel[kernGroupOffset], &K, &FZERO, &pOut[outOffset], &N); #else if (M>1) { SGEMM(&NOTRANS, &NOTRANS, &N, &M, &K, &FONE, pColBuff, &N, // N x K (float *)&pKernel[kernGroupOffset], &K, // K x M &FZERO, &pOut[outOffset], &N); // N x M }else{ // M==1 has some BLAS segv !!! just write it out for now... // At -O3 and -O4, ncc should have -fassociative-math -fmatrix-multiply // and should emit matrix-multiply code for these float const* A = pColBuff; // osz x icg*ksz (?) float const* B = (float const*)&pKernel[kernGroupOffset]; // icg*ksz x ocg float * C = &pOut[outOffset]; // osz x ocg (?) #if 0 if (M>1) { for (int n=0; n<N; n++) { for (int m=0; m<M; m++) { C[m*N + n] = 0.0f; } } for (int n=0; n<N; n++) { for (int m=0; m<M; m++) { float acc = 0.0f; for (int k=0; k<K; k++) { acc += A[k*N + n] * B[m*K + k]; } C[m*N + n] += acc; // beta=0, no accumulation into C } } }else #endif { // M==1 for (int n=0; n<N; n++) { float acc = 0.0f; for (int k=0; k<K; k++) { acc += A[k*N+n] * B[k]; } C[n] = acc; } } } #endif //printf(" back from SGEMM..."); fflush(stdout); if (pBias) { //printf("i2c bias...\n"); fflush(stdout); #if SGEMM_M1_SEGFAULTS==0 // issues ncc-3.4.20 and M==1 ??? SGEMM(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *)pOne, &N, (float *) &pBias[biasGroupOffset], &IONE, &FONE, &pOut[outOffset], &N); #elif SGEMM_M1_SEGFAULTS==1 // issues ncc-3.4.20 and M==1 ??? #if 1 //BIAS_SAXPY==0 SGEMM_A1B1K1(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *)pOne, &N, // size N x 1 (float *) &pBias[biasGroupOffset], &IONE, // size 1 x M &FONE, &pOut[outOffset], &N); // size N x M #else cblas_saxpy( M*N, 1.0, &pBias[biasGroupOffset], 0, &pOut[outOffset], 1); #endif #else if (M>1) { SGEMM(&NOTRANS, &NOTRANS, &N, &M, &IONE, &FONE, (float *) pOne, &N, (float *) &pBias[biasGroupOffset], &IONE, &FONE, &pOut[outOffset], &N); }else{ // workaround for M=1 segfault circa ncc 3.4.20 // using M==1 and K==1 // here A[N] is 1.0 float b0 = pBias[biasGroupOffset+0]; // B[0] for (int n=0; n<N; n++) { pOut[outOffset+n] += b0; // C[n] } } #endif } } // no_im2col? } // group } // batch if( transformed_filter != NULL ) free(transformed_filter) ; LFTRACE_END("convolution_forward_gemm"); return VEDNN_SUCCESS; } vednnError_t convolution_backward_data_gemm( const vednnTensorParam_t * restrict pParamGradOut, const void * restrict pDataGradOut, const vednnFilterParam_t * restrict pParamKernel, const void * restrict pDataKernel, const vednnTensorParam_t * restrict pParamGradIn, void * restrict pDataGradIn, float * restrict pColBuff, const vednnConvolutionParam_t * restrict pParamConv ) { LFTRACE_BEGIN("convolution_backward_data_gemm"); int n, g; int batch = pParamGradOut->batch; int gOutChannel = pParamGradOut->channel; int gOutWidth = pParamGradOut->width; int gOutHeight = pParamGradOut->height; int gInChannel = pParamGradIn->channel; int gInWidth = pParamGradIn->width; int gInHeight = pParamGradIn->height; int kernWidth = pParamKernel->width; int kernHeight = pParamKernel->height; int group = pParamConv->group; int strideWidth = pParamConv->strideWidth;; int strideHeight = pParamConv->strideHeight; int padWidth = pParamConv->padWidth; int padHeight = pParamConv->padHeight; int dilationWidth = pParamConv->dilationWidth; int dilationHeight = pParamConv->dilationHeight; int gOutChannelGroup = gOutChannel / group; int gInChannelGroup = gInChannel / group; int no_im2col = (kernWidth == 1 && kernHeight == 1 && strideWidth == 1 && strideHeight == 1 && padWidth == 0 && padHeight == 0); float * transformed_filter = NULL ; if( pParamKernel->layout == VEDNN_FILTER_LAYOUT_HWCN ) { // only support group=1 const int N = gOutChannel ; const int C = gInChannel ; const int H = kernHeight ; const int W = kernWidth ; float * filter = (float *) pDataKernel ; transformed_filter = (float *) malloc(sizeof(float)*N*C*H*W) ; #pragma omp parallel for for(int n=0; n<N ; n++) { for(int c=0; c<C ; c++) { for(int hw=0; hw<H*W ; hw++) { transformed_filter[((n*C+c)*H)*W+hw] = filter[((hw)*C+c)*N+n] ; } } } } const float * restrict pGradOut = pDataGradOut; const float * restrict pKernel = transformed_filter == NULL ? pDataKernel : transformed_filter ; float * restrict pGradIn = pDataGradIn; for (n = 0; n < batch; n++) { // this->num_ int gOutBatchOffset = n * gOutChannel * gOutWidth * gOutHeight; int gInBatchOffset = n * gInChannel * gInWidth * gInHeight; for (g = 0; g < group; g++) { int gOutGroupOffset = g * gOutChannelGroup * gOutHeight * gOutWidth; int gInGroupOffset = g * gInChannelGroup * gInHeight * gInWidth; int kernGroupOffset = g * gInChannelGroup * gOutChannelGroup * kernHeight * kernWidth; int gOutOffset = gOutBatchOffset + gOutGroupOffset; int gInOffset = gInBatchOffset + gInGroupOffset; int M = gInChannelGroup * kernWidth * kernHeight; int N = gOutWidth * gOutHeight; int K = gOutChannelGroup; if( no_im2col ) { SGEMM_A1B0t(&NOTRANS, &TRANS, &N, &M, &K, &FONE, (float *) &pGradOut[gOutOffset], &N, // N x K (float *) &pKernel[kernGroupOffset], &M, // M x K (trans!) &FZERO, &pGradIn[gInOffset], &N); // N x M } else { SGEMM_A1B0t(&NOTRANS, &TRANS, &N, &M, &K, &FONE, (float *) &pGradOut[gOutOffset], &N, (float *) &pKernel[kernGroupOffset], &M, &FZERO, pColBuff, &N); col2im_cpu(pColBuff, gInChannelGroup, gInHeight, gInWidth, kernHeight, kernWidth, gOutHeight, gOutWidth, padHeight, padWidth, strideHeight, strideWidth, dilationHeight, dilationWidth, &pGradIn[gInOffset]); } } // group } // batch if( transformed_filter != NULL ) free(transformed_filter) ; LFTRACE_END("convolution_backward_data_gemm"); return VEDNN_SUCCESS; } vednnError_t convolution_backward_filter_gemm( const vednnTensorParam_t * restrict pParamIn, const void * restrict pDataIn, const vednnTensorParam_t * restrict pParamGradOut, const void * restrict pDataGradOut, const vednnFilterParam_t * restrict pParamGradKernel, void * restrict pDataGradKernel, float * restrict pColBuff, const vednnConvolutionParam_t * restrict pParamConv ) { LFTRACE_BEGIN("convolution_backward_filter_gemm"); int n, g; int batch = pParamIn->batch; int inChannel = pParamIn->channel; int inWidth = pParamIn->width; int inHeight = pParamIn->height; int outChannel = pParamGradOut->channel; int outWidth = pParamGradOut->width; int outHeight = pParamGradOut->height; int kernWidth = pParamGradKernel->width; int kernHeight = pParamGradKernel->height; int group = pParamConv->group; int strideWidth = pParamConv->strideWidth;; int strideHeight = pParamConv->strideHeight; int padWidth = pParamConv->padWidth; int padHeight = pParamConv->padHeight; int dilationWidth = pParamConv->dilationWidth; int dilationHeight = pParamConv->dilationHeight; int inChannelGroup = inChannel / group; // pParamKernel->inChannel int outChannelGroup = outChannel / group; // pParamKernel->outChannel int no_im2col = (kernWidth == 1 && kernHeight == 1 && strideWidth == 1 && strideHeight == 1 && padWidth == 0 && padHeight == 0); float * transformed_filter = NULL ; if( pParamGradKernel->layout == VEDNN_FILTER_LAYOUT_HWCN ) { // only support group=1 const int N = outChannel ; const int C = inChannel ; const int H = kernHeight ; const int W = kernWidth ; transformed_filter = (float *) malloc(sizeof(float)*N*C*H*W) ; #pragma omp parallel for for(int i=0; i<N*C*H*W; i++) transformed_filter[i] = 0.f ; } const float * restrict pIn = pDataIn; const float * restrict pOut = pDataGradOut; float * restrict pKernel = transformed_filter == NULL ? pDataGradKernel : transformed_filter ; for (n = 0; n < batch; n++) { // this->num_ int inBatchOffset = n * inChannel * inWidth * inHeight; int outBatchOffset = n * outChannel * outWidth * outHeight; for (g = 0; g < group; g++) { int inGroupOffset = g * inChannelGroup * inHeight * inWidth; int outGroupOffset = g * outChannelGroup * outHeight * outWidth; int kernGroupOffset = g * outChannelGroup * inChannelGroup * kernHeight * kernWidth; int inOffset = inBatchOffset + inGroupOffset; int outOffset = outBatchOffset + outGroupOffset; if( no_im2col ) { int M = outChannelGroup; int N = inChannelGroup * kernWidth * kernHeight; int K = outWidth * outHeight; SGEMM_A1tB1(&TRANS, &NOTRANS, &N, &M, &K, &FONE, (float*)&pIn[inOffset], &K, (float*)&pOut[outOffset], &K, &FONE, &pKernel[kernGroupOffset], &N); } else { im2col_cpu(&pIn[inOffset], inChannelGroup, inHeight, inWidth, kernHeight, kernWidth, outHeight, outWidth, padHeight, padWidth, strideHeight, strideWidth, dilationHeight, dilationWidth, pColBuff); int M = outChannelGroup; int N = inChannelGroup * kernWidth * kernHeight; int K = outWidth * outHeight; SGEMM_A1tB1(&TRANS, &NOTRANS, &N, &M, &K, &FONE, pColBuff, &K, (float*)&pOut[outOffset], &K, &FONE, &pKernel[kernGroupOffset], &N); } } // group } // batch if( transformed_filter != NULL ) { const int N = outChannel ; const int C = inChannel ; const int H = kernHeight ; const int W = kernWidth ; float * filter = (float *) pDataGradKernel ; #pragma omp parallel for for(int n=0; n<N ; n++) { for(int c=0; c<C ; c++) { for(int hw=0; hw<H*W ; hw++) { filter[((hw)*C+c)*N+n] += transformed_filter[((n*C+c)*H)*W+hw] ; } } } free(transformed_filter) ; } LFTRACE_END("convolution_backward_filter_gemm"); return VEDNN_SUCCESS; } // vim: et ts=2 sw=2 cindent cino=^0,=0,l0,\:0,N-s syntax=cpp.doxygen
for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd foo void test_no_clause() { int i; #pragma omp for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}} #pragma omp for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel #pragma omp for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd collapse(2) for (i = 0; i < 16; ++i) // expected-note {{defined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; }
backward_binary_reduce_impl.h
/*! * Copyright (c) 2019 by Contributors * \file kernel/cuda/backward_binary_reduce_impl.h * \brief Minigun CPU UDFs for bacward binary reduce */ #ifndef DGL_KERNEL_CPU_BACKWARD_BINARY_REDUCE_IMPL_H_ #define DGL_KERNEL_CPU_BACKWARD_BINARY_REDUCE_IMPL_H_ #include <minigun/minigun.h> #include "../binary_reduce_impl_decl.h" #include "../utils.h" #include "./functor.h" #include "../csr_interface.h" namespace dgl { namespace kernel { namespace cpu { // Minigun UDF to compute backward binary reduce. template <int Mode, typename Idx, typename DType, typename Functors> struct BackwardBinaryReduce { static inline bool CondEdge( Idx src, Idx dst, Idx eid, BackwardGData<Idx, DType>* gdata) { return true; } static inline void ApplyEdge( Idx src, Idx dst, Idx eid, BackwardGData<Idx, DType>* gdata) { const int64_t D = gdata->x_length; const int64_t len = gdata->data_len; Idx lid = Functors::SelectLeft(src, eid, dst); Idx rid = Functors::SelectRight(src, eid, dst); Idx oid = Functors::SelectOut(src, eid, dst); if (gdata->lhs_mapping) { lid = Functors::GetId(lid, gdata->lhs_mapping); } if (gdata->rhs_mapping) { rid = Functors::GetId(rid, gdata->rhs_mapping); } if (gdata->out_mapping) { oid = Functors::GetId(oid, gdata->out_mapping); } DType* lhsoff = gdata->lhs_data + lid * D * len; DType* rhsoff = gdata->rhs_data + rid * D * len; DType* outoff = gdata->out_data + oid * D; DType* gradlhsoff = gdata->grad_lhs_data + lid * D * len; DType* gradrhsoff = gdata->grad_rhs_data + rid * D * len; DType* gradoutoff = gdata->grad_out_data + oid * D; for (int64_t tx = 0; tx < D; ++tx) { DType out = Functors::Read(outoff + tx); DType grad_out = Functors::Read(gradoutoff + tx); DType e = Functors::Op(lhsoff + tx * len, rhsoff + tx * len, len); DType grad_e = grad_out * Functors::BackwardWrite(e, out); if (0 == grad_e) continue; DType* lhs_base = lhsoff + tx * len; DType* rhs_base = rhsoff + tx * len; if (Mode == binary_op::kGradBoth) { for (int64_t i = 0; i < len; ++i) { DType lhs = Functors::Read(lhs_base + i); DType rhs = Functors::Read(rhs_base + i); DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e); DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e); DType grad = grad_lhs + grad_rhs; #pragma omp atomic gradlhsoff[tx * len + i] += grad; } } else if (Mode == binary_op::kGradLhs) { for (int64_t i = 0; i < len; ++i) { DType lhs = Functors::Read(lhs_base + i); DType rhs = Functors::Read(rhs_base + i); DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e); #pragma omp atomic gradlhsoff[tx * len + i] += grad_lhs; } } else if (Mode == binary_op::kGradRhs) { for (int64_t i = 0; i < len; ++i) { DType lhs = Functors::Read(lhs_base + i); DType rhs = Functors::Read(rhs_base + i); DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e); #pragma omp atomic gradrhsoff[tx * len + i] += grad_rhs; } } } } }; // Minigun UDF to compute backward binary reduce with broadcasting. template <int Mode, int NDim, typename Idx, typename DType, typename Functors> struct BackwardBinaryReduceBcast { static inline bool CondEdge( Idx src, Idx dst, Idx eid, BackwardBcastGData<NDim, Idx, DType>* gdata) { return true; } static inline void ApplyEdge( Idx src, Idx dst, Idx eid, BackwardBcastGData<NDim, Idx, DType>* gdata) { const int64_t len = gdata->data_len; Idx lid = Functors::SelectLeft(src, eid, dst); Idx rid = Functors::SelectRight(src, eid, dst); Idx oid = Functors::SelectOut(src, eid, dst); if (gdata->lhs_mapping) { lid = Functors::GetId(lid, gdata->lhs_mapping); } if (gdata->rhs_mapping) { rid = Functors::GetId(rid, gdata->rhs_mapping); } if (gdata->out_mapping) { oid = Functors::GetId(oid, gdata->out_mapping); } DType* lhsoff = gdata->lhs_data + lid * gdata->lhs_len * len; DType* rhsoff = gdata->rhs_data + rid * gdata->rhs_len * len; DType* outoff = gdata->out_data + oid * gdata->out_len; DType* gradlhsoff = gdata->grad_lhs_data + lid * gdata->out_len * len; DType* gradrhsoff = gdata->grad_rhs_data + rid * gdata->out_len * len; DType* gradoutoff = gdata->grad_out_data + oid * gdata->out_len; int64_t tmp[NDim]; // store unraveled idx. for (int64_t tx = 0; tx < gdata->out_len; ++tx) { Unravel(tx, gdata->ndim, gdata->out_shape, gdata->out_stride, tmp); DType out = Functors::Read(outoff + tx); DType grad_out = Functors::Read(gradoutoff + tx); DType e = Functors::Op( lhsoff + Ravel(tmp, gdata->ndim, gdata->lhs_shape, gdata->lhs_stride) * len, rhsoff + Ravel(tmp, gdata->ndim, gdata->rhs_shape, gdata->rhs_stride) * len, len); DType grad_e = grad_out * Functors::BackwardWrite(e, out); // (pawelpiotrowicz) Although we can technically add the same condition for // skipping atomic additions as in BackwardBinaryReduce, doing so made the // speed 2% slower in GCMC training on MovieLens-1M with 24 OpenMP threads. // For more details, see https://github.com/dmlc/dgl/pull/1527. // TODO(BarclayII): Needs further investigation and benchmarking. DType* lhs_base = lhsoff + Ravel(tmp, gdata->ndim, gdata->lhs_shape, gdata->lhs_stride) * len; DType* rhs_base = rhsoff + Ravel(tmp, gdata->ndim, gdata->rhs_shape, gdata->rhs_stride) * len; if (Mode == binary_op::kGradBoth) { for (int64_t i = 0; i < len; ++i) { DType lhs = Functors::Read(lhs_base + i); DType rhs = Functors::Read(rhs_base + i); DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e); DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e); DType grad = grad_lhs + grad_rhs; #pragma omp atomic gradlhsoff[tx * len + i] += grad; } } else if (Mode == binary_op::kGradLhs) { for (int64_t i = 0; i < len; ++i) { DType lhs = Functors::Read(lhs_base + i); DType rhs = Functors::Read(rhs_base + i); DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e); #pragma omp atomic gradlhsoff[tx * len + i] += grad_lhs; } } else if (Mode == binary_op::kGradRhs) { for (int64_t i = 0; i < len; ++i) { DType lhs = Functors::Read(lhs_base + i); DType rhs = Functors::Read(rhs_base + i); DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e); #pragma omp atomic gradrhsoff[tx * len + i] += grad_rhs; } } } } }; // Auxiliary template used in UDF. template <typename Idx, typename DType, typename LeftSelector, typename RightSelector, typename BinaryOp, typename Reducer> struct BackwardFunctorsTempl { static inline Idx SelectOut( Idx src, Idx edge, Idx dst) { typedef typename OutSelector<Reducer>::Type OutTarget; return SwitchSrcDst<OutTarget>::Type::Call(src, edge, dst); } static inline Idx SelectLeft( Idx src, Idx edge, Idx dst) { return LeftSelector::Call(src, edge, dst); } static inline Idx SelectRight( Idx src, Idx edge, Idx dst) { return RightSelector::Call(src, edge, dst); } static inline DType Op(DType* lhs, DType* rhs, int64_t len) { return BinaryOp::Call(lhs, rhs, len); } static inline DType Read(DType* addr) { return *addr; } static inline void Write(DType* addr, DType val) { Reducer::Call(addr, val); } static inline Idx GetId(Idx id, Idx* id_map) { return *(id_map + id); } static inline DType BackwardWrite(DType val, DType accum) { return Reducer::BackwardCall(val, accum); } static inline DType BackwardOpLhs(DType lhs, DType rhs, DType out) { return BinaryOp::BackwardLhs(lhs, rhs, out); } static inline DType BackwardOpRhs(DType lhs, DType rhs, DType out) { return BinaryOp::BackwardRhs(lhs, rhs, out); } }; typedef minigun::advance::Config<true, minigun::advance::kV2N> AdvanceConfig; } // namespace cpu // Template implementation of BackwardBinaryReduce operator. template <int XPU, int Mode, typename Idx, typename DType, typename LeftSelector, typename RightSelector, typename BinaryOp, typename Reducer> void CallBackwardBinaryReduce( const minigun::advance::RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardGData<Idx, DType>* gdata) { // For backward computation, we use reverse csr and switch dst and src. // This benefits the most common src_op_edge or copy_src case, because the // gradients of src are now aggregated into destination buffer to reduce // competition of atomic add. auto incsr = graph.GetInCSRMatrix(); minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(incsr.indptr, incsr.indices); typedef cpu::BackwardFunctorsTempl<Idx, DType, typename SwitchSrcDst<LeftSelector>::Type, typename SwitchSrcDst<RightSelector>::Type, BinaryOp, Reducer> Functors; typedef cpu::BackwardBinaryReduce<Mode, Idx, DType, Functors> UDF; // If the user-given mapping is none and the target is edge data, we need to // replace the mapping by the edge ids in the csr graph so that the edge // data is correctly read/written. if (LeftSelector::target == binary_op::kEdge && gdata->lhs_mapping == nullptr) { gdata->lhs_mapping = static_cast<Idx*>(incsr.data->data); } if (RightSelector::target == binary_op::kEdge && gdata->rhs_mapping == nullptr) { gdata->rhs_mapping = static_cast<Idx*>(incsr.data->data); } if (OutSelector<Reducer>::Type::target == binary_op::kEdge && gdata->out_mapping == nullptr) { gdata->out_mapping = static_cast<Idx*>(incsr.data->data); } // TODO(minjie): allocator minigun::advance::Advance<XPU, Idx, cpu::AdvanceConfig, BackwardGData<Idx, DType>, UDF>( rtcfg, csr, gdata, minigun::IntArray1D<Idx>()); } // Following macro is used to generate explicit-specialization of the template // operator. #define GEN_BACKWARD_DEFINE(mode, dtype, lhs_tgt, rhs_tgt, op) \ template void CallBackwardBinaryReduce<XPU, \ mode, IDX, dtype, \ lhs_tgt, rhs_tgt, \ op<dtype>, REDUCER<XPU, dtype>>( \ const minigun::advance::RuntimeConfig& rtcfg, \ const CSRWrapper& graph, \ BackwardGData<IDX, dtype>* gdata); // Template implementation of BackwardBinaryReduce with broadcasting operator. template <int XPU, int Mode, int NDim, typename Idx, typename DType, typename LeftSelector, typename RightSelector, typename BinaryOp, typename Reducer> void CallBackwardBinaryReduceBcast( const minigun::advance::RuntimeConfig& rtcfg, const CSRWrapper& graph, BackwardBcastGData<NDim, Idx, DType>* gdata) { // For backward computation, we use reverse csr and switch dst and src. // This benefits the most common src_op_edge or copy_src case, because the // gradients of src are now aggregated into destination buffer to reduce // competition of atomic add. auto incsr = graph.GetInCSRMatrix(); minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(incsr.indptr, incsr.indices); typedef cpu::BackwardFunctorsTempl<Idx, DType, typename SwitchSrcDst<LeftSelector>::Type, typename SwitchSrcDst<RightSelector>::Type, BinaryOp, Reducer> Functors; typedef cpu::BackwardBinaryReduceBcast<Mode, NDim, Idx, DType, Functors> UDF; // If the user-given mapping is none and the target is edge data, we need to // replace the mapping by the edge ids in the csr graph so that the edge // data is correctly read/written. if (LeftSelector::target == binary_op::kEdge && gdata->lhs_mapping == nullptr) { gdata->lhs_mapping = static_cast<Idx*>(incsr.data->data); } if (RightSelector::target == binary_op::kEdge && gdata->rhs_mapping == nullptr) { gdata->rhs_mapping = static_cast<Idx*>(incsr.data->data); } if (OutSelector<Reducer>::Type::target == binary_op::kEdge && gdata->out_mapping == nullptr) { gdata->out_mapping = static_cast<Idx*>(incsr.data->data); } // TODO(minjie): allocator minigun::advance::Advance<XPU, Idx, cpu::AdvanceConfig, BackwardBcastGData<NDim, Idx, DType>, UDF>( rtcfg, csr, gdata, minigun::IntArray1D<Idx>()); } // Following macro is used to generate explicit-specialization of the template // operator. #define GEN_BACKWARD_BCAST_DEFINE(mode, ndim, dtype, lhs_tgt, rhs_tgt, op) \ template void CallBackwardBinaryReduceBcast<XPU, \ mode, ndim, IDX, dtype, \ lhs_tgt, rhs_tgt, \ op<dtype>, REDUCER<XPU, dtype>>( \ const minigun::advance::RuntimeConfig& rtcfg, \ const CSRWrapper& graph, \ BackwardBcastGData<ndim, IDX, dtype>* gdata); } // namespace kernel } // namespace dgl #endif // DGL_KERNEL_CPU_BACKWARD_BINARY_REDUCE_IMPL_H_
jacobi-avx.c
#include <immintrin.h> inline void kernel(double* v1, double * v2, int m) { __m256d alpha = _mm256_set1_pd(0.25); // __m256d phi_e = _mm256_loadu_pd (v1 + 1 ); __m256d phi_w = _mm256_loadu_pd (v1 - 1 ); __m256d phi_n = _mm256_loadu_pd (v1 + m); __m256d phi_s = _mm256_loadu_pd (v1 - m); // phi_e = _mm256_add_pd(phi_e, phi_s); phi_e = _mm256_add_pd(phi_e, phi_n); //phi_e = _mm_fmadd_pd(alpha, phi_e, phi_w); phi_e = _mm256_add_pd(phi_e, phi_w); phi_e = _mm256_mul_pd(alpha, phi_e); // _mm256_storeu_pd(v2, phi_e); } inline void kernel_sequential(double* v1, double * v2, int m) { double phi_e = *(v1 + 1); double phi_w = *(v1 - 1); double phi_n = *(v1 + m); double phi_s = *(v1 - m); double phi = 0.25*(phi_e + phi_w + phi_n + phi_s); *(v2) = phi; } void laplacian(double* v1, double* v2, int dim_m, int dim_n) { // //#pragma omp parallel #pragma omp parallel for schedule(static) for (int j = 1; j < dim_n - 1; ++j ) { __builtin_prefetch ((void *) v2 + j*dim_n + 256, 0, 1); int i; for (i = 1; i < dim_m - 1 - (dim_m - 1)%4; i = i + 4) { kernel(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n); } for (; i < dim_m - 1; ++i) { kernel_sequential(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n); } } }
rose_jacobi_seq.c
/* An example code * * */ #include <stdio.h> #include <math.h> #include "omp.h" void driver(); void initialize(); void jacobi(); void error_check(); #define MSIZE 200 int n; int m; int mits; double tol; double relax = 1.0; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; int main() { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n = 200; m = 200; tol = 0.0000000001; mits = 1000; driver(); return 1; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver() { initialize(); /* Solve Helmholtz equation */ jacobi(); /* error_check (n,m,alpha,dx,dy,u,f) */ error_check(); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize() { int i; int j; int xx; int yy; // double PI = 3.1415926; // -->dx@112:2 dx = 2.0 / (n - 1); //-->dy@113:2 dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(i,j,xx,yy) #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][0] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi() { double omega; int i; int j; int k; double error; double resid; double ax; double ay; double b; omega = relax; /* * Initialize coefficients */ /* X-direction coef */ ax = 1.0 / (dx * dx); /* Y-direction coef */ ay = 1.0 / (dy * dy); /* Central coeff */ b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; error = 10.0 * tol; k = 1; while(k <= mits && error > tol){ error = 0.0; /* Copy new solution into old */ //#pragma omp parallel { //#pragma omp for private(i,j) #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { uold[i][j] = u[i][j]; } } //#pragma omp for private(i,j,resid) reduction(+:error) nowait #pragma omp parallel for private (resid,i,j) reduction (+:error) for (i = 1; i <= n - 1 - 1; i += 1) { #pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } } /* omp end parallel */ /* Error check */ // k = k + 1; error = sqrt(error) / (n * m); /* End iteration loop */ } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); } void error_check() { int i; int j; double xx; double yy; double temp; double error; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); error = 0.0; //#pragma omp parallel for private(i,j,xx,yy,temp) reduction(+:error) #pragma omp parallel for private (xx,yy,temp,i,j) reduction (+:error) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,temp,j) reduction (+:error) firstprivate (dx,dy) for (j = 0; j <= m - 1; j += 1) { xx = - 1.0 + dx * (i - 1); yy = - 1.0 + dy * (j - 1); temp = u[i][j] - (1.0 - xx * xx) * (1.0 - yy * yy); error = error + temp * temp; } } error = sqrt(error) / (n * m); printf("Solution Error :%E \n",error); }
sudoku_solver_parallel.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <omp.h> #define EMPTY 0 #define MAX_SIZE 25 int findEmptyLocation(int matrix[MAX_SIZE][MAX_SIZE], int *row, int *col, int box_size); int canBeFilled(int matrix[MAX_SIZE][MAX_SIZE], int row, int col, int num, int box_size, int grid_sz); void printMatrix(int matrix[MAX_SIZE][MAX_SIZE], int box_sz) { #pragma omp critical { printf("solution matrix\n"); int row, col; for (row = 0; row < box_sz; row++) { for (col = 0; col < box_sz; col++) printf("%d ", matrix[row][col]); printf("\n"); } } } int solveSudoku(int row, int col, int matrix[MAX_SIZE][MAX_SIZE], int box_sz, int grid_sz) { if(col > (box_sz - 1)) { col = 0; row++; } if(row > (box_sz - 1)) { return 1; } if(matrix[row][col] != EMPTY) { //#pragma omp task firstprivate(col, row) if (solveSudoku(row, col+1, matrix, box_sz, grid_sz)) { printMatrix(matrix, box_sz); } } else { int num; for (num = 1; num <= box_sz; num++) { if (canBeFilled(matrix, row, col, num, box_sz, grid_sz)) { #pragma omp task firstprivate(num, col, row) { int tempMatrix[MAX_SIZE][MAX_SIZE]; int i; int j; for(i=0; i<box_sz; i++) { for( j=0; j<box_sz; j++){ tempMatrix[i][j] = matrix[i][j]; } } tempMatrix[row][col] = num; if (solveSudoku(row, col+1, tempMatrix, box_sz, grid_sz)) printMatrix(tempMatrix, box_sz); } } } } return 0; } int existInRow(int matrix[MAX_SIZE][MAX_SIZE], int row, int num, int box_size) { int col; for (col = 0; col < box_size; col++) if (matrix[row][col] == num) return 1; return 0; } int existInColumn(int matrix[MAX_SIZE][MAX_SIZE], int col, int num, int box_size) { int row; for (row = 0; row < box_size; row++) if (matrix[row][col] == num) return 1; return 0; } int existInGrid(int matrix[MAX_SIZE][MAX_SIZE], int gridOffsetRow, int gridOffsetColumn, int num, int grid_sz) { int row, col; for (row = 0; row < grid_sz; row++) for (col = 0; col < grid_sz; col++) if (matrix[row+gridOffsetRow][col+gridOffsetColumn] == num) return 1; return 0; } int canBeFilled(int matrix[MAX_SIZE][MAX_SIZE], int row, int col, int num, int box_size, int grid_sz) { return !existInRow(matrix, row, num, box_size) && !existInColumn(matrix, col, num, box_size) && !existInGrid(matrix, row - row%grid_sz , col - col%grid_sz, num, grid_sz)&& matrix[row][col]==EMPTY; } void readCSV(int box_sz, char *filename, int matrix[MAX_SIZE][MAX_SIZE]){ FILE *file; file = fopen(filename, "r"); int i = 0; char line[4098]; while (fgets(line, 4098, file) && (i < box_sz)) { char* tmp = strdup(line); int j = 0; const char* tok; for (tok = strtok(line, ","); tok && *tok; j++, tok = strtok(NULL, ",\n")) { matrix[i][j] = atof(tok); } free(tmp); i++; } } int main(int argc, char const *argv[]) { double time1 = omp_get_wtime(); if (argc < 3){ printf("Please specify matrix size and the CSV file name as inputs.\n"); exit(0); } int box_sz = atoi(argv[1]); int grid_sz = sqrt(box_sz); char filename[256]; strcpy(filename, argv[2]); int matrix[MAX_SIZE][MAX_SIZE]; readCSV(box_sz, filename, matrix); #pragma omp parallel { #pragma omp single { solveSudoku(0, 0, matrix, box_sz, grid_sz); } } printf("Elapsed time: %0.2lf\n", omp_get_wtime() - time1); return 0; }
convolution_winograd_transform_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24)); float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd64_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 16, vcvt_bf16_f32(_out04)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); vst1_u16(output0 + 20, vcvt_bf16_f32(_out05)); output0 += outw * 4; } } } } } static void conv3x3s1_winograd42_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } static void conv3x3s1_winograd42_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); output0 += outw * 4; } } } } }
GB_unop__sin_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sin_fp64_fp64 // op(A') function: GB_unop_tran__sin_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = sin (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sin (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = sin (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sin_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = sin (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = sin (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sin_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRACC_OMP_035_SAXPY_without_Task_Barrier_yes.c
/* SAXPY without a barrier at the end of execution to wait for the tasks to finish. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 4096 float a; float x[C]; float y[C]; int init(){ for(int i=0; i<C; i++){ a=5; x[i]=0; y[i]=3; } return 0; } int saxpy(){ #pragma omp target map(to:y[0:C],a) map(tofrom:x[0:C]) nowait device(0) { for(int i=0; i<C; i++){ #pragma omp task depend(inout:x[i]) { x[i] = a * x[i]; } #pragma omp task depend(inout:x[i]) { x[i] = x[i] + y[i]; } } } return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(x[i]!=3){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ init(); saxpy(); check(); #pragma omp taskwait return 0; }
GB_binop__lt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_01__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_03__lt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int32) // A*D function (colscale): GB (_AxD__lt_int32) // D*A function (rowscale): GB (_DxB__lt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__lt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__lt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int32) // C=scalar+B GB (_bind1st__lt_int32) // C=scalar+B' GB (_bind1st_tran__lt_int32) // C=A+scalar GB (_bind2nd__lt_int32) // C=A'+scalar GB (_bind2nd_tran__lt_int32) // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT32 || GxB_NO_LT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
_sphere.c
/* Generated by Cython 0.20.1 on Fri Jun 6 16:03:35 2014 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__fatiando__gravmag___sphere #define __PYX_HAVE_API__fatiando__gravmag___sphere #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "omp.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char* default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_sphere.pyx", "__init__.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fatiando/gravmag/_sphere.pyx":15 * * DTYPE = numpy.float * ctypedef numpy.float_t DTYPE_T # <<<<<<<<<<<<<< * * cdef inline double kernelz(double x, double y, double z, double r_cb) nogil: */ typedef __pyx_t_5numpy_float_t __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/ #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'openmp' */ /* Module declarations from 'fatiando.gravmag._sphere' */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelz(double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelxx(double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelxy(double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelxz(double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelyy(double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelyz(double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelzz(double, double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T = { "DTYPE_T", NULL, sizeof(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "fatiando.gravmag._sphere" int __pyx_module_is_main_fatiando__gravmag___sphere = 0; /* Implementation of 'fatiando.gravmag._sphere' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_2bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_4by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_6bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_8gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_10gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_12gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_14gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_16gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_18gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_20gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_B[] = "B"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_l[] = "l"; static char __pyx_k_q[] = "q"; static char __pyx_k_x[] = "x"; static char __pyx_k_y[] = "y"; static char __pyx_k_z[] = "z"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_bx[] = "bx"; static char __pyx_k_by[] = "by"; static char __pyx_k_bz[] = "bz"; static char __pyx_k_fx[] = "fx"; static char __pyx_k_fy[] = "fy"; static char __pyx_k_fz[] = "fz"; static char __pyx_k_gz[] = "gz"; static char __pyx_k_mx[] = "mx"; static char __pyx_k_my[] = "my"; static char __pyx_k_mz[] = "mz"; static char __pyx_k_pi[] = "pi"; static char __pyx_k_tf[] = "tf"; static char __pyx_k_v1[] = "v1"; static char __pyx_k_v2[] = "v2"; static char __pyx_k_v3[] = "v3"; static char __pyx_k_v4[] = "v4"; static char __pyx_k_v5[] = "v5"; static char __pyx_k_v6[] = "v6"; static char __pyx_k_xc[] = "xc"; static char __pyx_k_xp[] = "xp"; static char __pyx_k_yc[] = "yc"; static char __pyx_k_yp[] = "yp"; static char __pyx_k_zc[] = "zc"; static char __pyx_k_zp[] = "zp"; static char __pyx_k_gxx[] = "gxx"; static char __pyx_k_gxy[] = "gxy"; static char __pyx_k_gxz[] = "gxz"; static char __pyx_k_gyy[] = "gyy"; static char __pyx_k_gyz[] = "gyz"; static char __pyx_k_gzz[] = "gzz"; static char __pyx_k_r_5[] = "r_5"; static char __pyx_k_res[] = "res"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_mass[] = "mass"; static char __pyx_k_r_cb[] = "r_cb"; static char __pyx_k_size[] = "size"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_DTYPE[] = "DTYPE"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_r_sqr[] = "r_sqr"; static char __pyx_k_range[] = "range"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_radius[] = "radius"; static char __pyx_k_volume[] = "volume"; static char __pyx_k_density[] = "density"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_fatiando_gravmag__sphere[] = "fatiando.gravmag._sphere"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_home_leo_src_fatiando_fatiando[] = "/home/leo/src/fatiando/fatiando/gravmag/_sphere.pyx"; static char __pyx_k_Cython_implementation_of_the_gr[] = "\nCython implementation of the gravity and magnetic fields of spheres.\n"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_bx; static PyObject *__pyx_n_s_by; static PyObject *__pyx_n_s_bz; static PyObject *__pyx_n_s_density; static PyObject *__pyx_n_s_fatiando_gravmag__sphere; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_fx; static PyObject *__pyx_n_s_fy; static PyObject *__pyx_n_s_fz; static PyObject *__pyx_n_s_gxx; static PyObject *__pyx_n_s_gxy; static PyObject *__pyx_n_s_gxz; static PyObject *__pyx_n_s_gyy; static PyObject *__pyx_n_s_gyz; static PyObject *__pyx_n_s_gz; static PyObject *__pyx_n_s_gzz; static PyObject *__pyx_kp_s_home_leo_src_fatiando_fatiando; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_l; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mass; static PyObject *__pyx_n_s_mx; static PyObject *__pyx_n_s_my; static PyObject *__pyx_n_s_mz; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_pi; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_r_5; static PyObject *__pyx_n_s_r_cb; static PyObject *__pyx_n_s_r_sqr; static PyObject *__pyx_n_s_radius; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tf; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_v1; static PyObject *__pyx_n_s_v2; static PyObject *__pyx_n_s_v3; static PyObject *__pyx_n_s_v4; static PyObject *__pyx_n_s_v5; static PyObject *__pyx_n_s_v6; static PyObject *__pyx_n_s_volume; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xc; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_yc; static PyObject *__pyx_n_s_yp; static PyObject *__pyx_n_s_z; static PyObject *__pyx_n_s_zc; static PyObject *__pyx_n_s_zp; static PyObject *__pyx_float_3_; static PyObject *__pyx_float_4_; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; /* "fatiando/gravmag/_sphere.pyx":17 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double x, double y, double z, double r_cb) nogil: # <<<<<<<<<<<<<< * return z/r_cb * cdef inline double kernelxx(double x, double y, double z, double r_sqr, */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelz(CYTHON_UNUSED double __pyx_v_x, CYTHON_UNUSED double __pyx_v_y, double __pyx_v_z, double __pyx_v_r_cb) { double __pyx_r; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":18 * * cdef inline double kernelz(double x, double y, double z, double r_cb) nogil: * return z/r_cb # <<<<<<<<<<<<<< * cdef inline double kernelxx(double x, double y, double z, double r_sqr, * double r_5) nogil: */ if (unlikely(__pyx_v_r_cb == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_v_z / __pyx_v_r_cb); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":17 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double x, double y, double z, double r_cb) nogil: # <<<<<<<<<<<<<< * return z/r_cb * cdef inline double kernelxx(double x, double y, double z, double r_sqr, */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":19 * cdef inline double kernelz(double x, double y, double z, double r_cb) nogil: * return z/r_cb * cdef inline double kernelxx(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return ((3*x**2) - r_sqr)/r_5 */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelxx(double __pyx_v_x, CYTHON_UNUSED double __pyx_v_y, CYTHON_UNUSED double __pyx_v_z, double __pyx_v_r_sqr, double __pyx_v_r_5) { double __pyx_r; double __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":21 * cdef inline double kernelxx(double x, double y, double z, double r_sqr, * double r_5) nogil: * return ((3*x**2) - r_sqr)/r_5 # <<<<<<<<<<<<<< * cdef inline double kernelxy(double x, double y, double z, double r_sqr, * double r_5) nogil: */ __pyx_t_1 = ((3.0 * pow(__pyx_v_x, 2.0)) - __pyx_v_r_sqr); if (unlikely(__pyx_v_r_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_t_1 / __pyx_v_r_5); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":19 * cdef inline double kernelz(double x, double y, double z, double r_cb) nogil: * return z/r_cb * cdef inline double kernelxx(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return ((3*x**2) - r_sqr)/r_5 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelxx", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":22 * double r_5) nogil: * return ((3*x**2) - r_sqr)/r_5 * cdef inline double kernelxy(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return (3*x*y)/r_5 */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelxy(double __pyx_v_x, double __pyx_v_y, CYTHON_UNUSED double __pyx_v_z, CYTHON_UNUSED double __pyx_v_r_sqr, double __pyx_v_r_5) { double __pyx_r; double __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":24 * cdef inline double kernelxy(double x, double y, double z, double r_sqr, * double r_5) nogil: * return (3*x*y)/r_5 # <<<<<<<<<<<<<< * cdef inline double kernelxz(double x, double y, double z, double r_sqr, * double r_5) nogil: */ __pyx_t_1 = ((3.0 * __pyx_v_x) * __pyx_v_y); if (unlikely(__pyx_v_r_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_t_1 / __pyx_v_r_5); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":22 * double r_5) nogil: * return ((3*x**2) - r_sqr)/r_5 * cdef inline double kernelxy(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return (3*x*y)/r_5 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelxy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":25 * double r_5) nogil: * return (3*x*y)/r_5 * cdef inline double kernelxz(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return (3*x*z)/r_5 */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelxz(double __pyx_v_x, CYTHON_UNUSED double __pyx_v_y, double __pyx_v_z, CYTHON_UNUSED double __pyx_v_r_sqr, double __pyx_v_r_5) { double __pyx_r; double __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":27 * cdef inline double kernelxz(double x, double y, double z, double r_sqr, * double r_5) nogil: * return (3*x*z)/r_5 # <<<<<<<<<<<<<< * cdef inline double kernelyy(double x, double y, double z, double r_sqr, * double r_5) nogil: */ __pyx_t_1 = ((3.0 * __pyx_v_x) * __pyx_v_z); if (unlikely(__pyx_v_r_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_t_1 / __pyx_v_r_5); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":25 * double r_5) nogil: * return (3*x*y)/r_5 * cdef inline double kernelxz(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return (3*x*z)/r_5 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelxz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":28 * double r_5) nogil: * return (3*x*z)/r_5 * cdef inline double kernelyy(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return ((3*y**2) - r_sqr)/r_5 */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelyy(CYTHON_UNUSED double __pyx_v_x, double __pyx_v_y, CYTHON_UNUSED double __pyx_v_z, double __pyx_v_r_sqr, double __pyx_v_r_5) { double __pyx_r; double __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":30 * cdef inline double kernelyy(double x, double y, double z, double r_sqr, * double r_5) nogil: * return ((3*y**2) - r_sqr)/r_5 # <<<<<<<<<<<<<< * cdef inline double kernelyz(double x, double y, double z, double r_sqr, * double r_5) nogil: */ __pyx_t_1 = ((3.0 * pow(__pyx_v_y, 2.0)) - __pyx_v_r_sqr); if (unlikely(__pyx_v_r_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_t_1 / __pyx_v_r_5); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":28 * double r_5) nogil: * return (3*x*z)/r_5 * cdef inline double kernelyy(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return ((3*y**2) - r_sqr)/r_5 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelyy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":31 * double r_5) nogil: * return ((3*y**2) - r_sqr)/r_5 * cdef inline double kernelyz(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return (3*y*z)/r_5 */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelyz(CYTHON_UNUSED double __pyx_v_x, double __pyx_v_y, double __pyx_v_z, CYTHON_UNUSED double __pyx_v_r_sqr, double __pyx_v_r_5) { double __pyx_r; double __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":33 * cdef inline double kernelyz(double x, double y, double z, double r_sqr, * double r_5) nogil: * return (3*y*z)/r_5 # <<<<<<<<<<<<<< * cdef inline double kernelzz(double x, double y, double z, double r_sqr, * double r_5) nogil: */ __pyx_t_1 = ((3.0 * __pyx_v_y) * __pyx_v_z); if (unlikely(__pyx_v_r_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_t_1 / __pyx_v_r_5); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":31 * double r_5) nogil: * return ((3*y**2) - r_sqr)/r_5 * cdef inline double kernelyz(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return (3*y*z)/r_5 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelyz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":34 * double r_5) nogil: * return (3*y*z)/r_5 * cdef inline double kernelzz(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return ((3*z**2) - r_sqr)/r_5 */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_7_sphere_kernelzz(CYTHON_UNUSED double __pyx_v_x, CYTHON_UNUSED double __pyx_v_y, double __pyx_v_z, double __pyx_v_r_sqr, double __pyx_v_r_5) { double __pyx_r; double __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_sphere.pyx":36 * cdef inline double kernelzz(double x, double y, double z, double r_sqr, * double r_5) nogil: * return ((3*z**2) - r_sqr)/r_5 # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_1 = ((3.0 * pow(__pyx_v_z, 2.0)) - __pyx_v_r_sqr); if (unlikely(__pyx_v_r_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = (__pyx_t_1 / __pyx_v_r_5); goto __pyx_L0; /* "fatiando/gravmag/_sphere.pyx":34 * double r_5) nogil: * return (3*y*z)/r_5 * cdef inline double kernelzz(double x, double y, double z, double r_sqr, # <<<<<<<<<<<<<< * double r_5) nogil: * return ((3*z**2) - r_sqr)/r_5 */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._sphere.kernelzz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":40 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_1tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_tf[] = "tf(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double mx, double my, double mz, double fx, double fy, double fz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_1tf = {__Pyx_NAMESTR("tf"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_1tf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_tf)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_1tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; double __pyx_v_fx; double __pyx_v_fy; double __pyx_v_fz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("tf (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_fx,&__pyx_n_s_fy,&__pyx_n_s_fz,&__pyx_n_s_res,0}; PyObject* values[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 13: if (likely((values[13] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 13); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 14) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); values[13] = PyTuple_GET_ITEM(__pyx_args, 13); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fx = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_fx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fy = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_fy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fz = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_fz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[13]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_tf(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_fx, __pyx_v_fy, __pyx_v_fz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_volume; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_5; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_bx; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_by; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_bz; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tf", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":49 * cdef DTYPE_T x, y, z * cdef DTYPE_T volume, r_sqr, r_5, v1, v2, v3, v4, v5, v6, bx, by, bz * size = len(xp) # <<<<<<<<<<<<<< * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":50 * cdef DTYPE_T volume, r_sqr, r_5, v1, v2, v3, v4, v5, v6, bx, by, bz * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_pi); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_float_4_, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_float_3_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_volume = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":51 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":52 * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_r_sqr) lastprivate(__pyx_v_v6) lastprivate(__pyx_v_y) lastprivate(__pyx_v_z) lastprivate(__pyx_v_v2) lastprivate(__pyx_v_v1) lastprivate(__pyx_v_v4) lastprivate(__pyx_v_x) lastprivate(__pyx_v_bx) lastprivate(__pyx_v_v3) lastprivate(__pyx_v_bz) lastprivate(__pyx_v_r_5) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_by) lastprivate(__pyx_v_v5) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v6 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v2 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v1 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v4 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_bx = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v3 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_bz = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_by = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":55 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":56 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":57 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":58 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * r_5 = r_sqr**(2.5) * v1 = kernelxx(x, y, z, r_sqr, r_5) */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":59 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) # <<<<<<<<<<<<<< * v1 = kernelxx(x, y, z, r_sqr, r_5) * v2 = kernelxy(x, y, z, r_sqr, r_5) */ __pyx_v_r_5 = pow(((double)__pyx_v_r_sqr), 2.5); /* "fatiando/gravmag/_sphere.pyx":60 * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) * v1 = kernelxx(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v2 = kernelxy(x, y, z, r_sqr, r_5) * v3 = kernelxz(x, y, z, r_sqr, r_5) */ __pyx_v_v1 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxx(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":61 * r_5 = r_sqr**(2.5) * v1 = kernelxx(x, y, z, r_sqr, r_5) * v2 = kernelxy(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v3 = kernelxz(x, y, z, r_sqr, r_5) * v4 = kernelyy(x, y, z, r_sqr, r_5) */ __pyx_v_v2 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":62 * v1 = kernelxx(x, y, z, r_sqr, r_5) * v2 = kernelxy(x, y, z, r_sqr, r_5) * v3 = kernelxz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v4 = kernelyy(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) */ __pyx_v_v3 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":63 * v2 = kernelxy(x, y, z, r_sqr, r_5) * v3 = kernelxz(x, y, z, r_sqr, r_5) * v4 = kernelyy(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v5 = kernelyz(x, y, z, r_sqr, r_5) * v6 = kernelzz(x, y, z, r_sqr, r_5) */ __pyx_v_v4 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelyy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":64 * v3 = kernelxz(x, y, z, r_sqr, r_5) * v4 = kernelyy(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v6 = kernelzz(x, y, z, r_sqr, r_5) * bx = (v1*mx + v2*my + v3*mz) */ __pyx_v_v5 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelyz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":65 * v4 = kernelyy(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) * v6 = kernelzz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * bx = (v1*mx + v2*my + v3*mz) * by = (v2*mx + v4*my + v5*mz) */ __pyx_v_v6 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelzz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":66 * v5 = kernelyz(x, y, z, r_sqr, r_5) * v6 = kernelzz(x, y, z, r_sqr, r_5) * bx = (v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * by = (v2*mx + v4*my + v5*mz) * bz = (v3*mx + v5*my + v6*mz) */ __pyx_v_bx = (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); /* "fatiando/gravmag/_sphere.pyx":67 * v6 = kernelzz(x, y, z, r_sqr, r_5) * bx = (v1*mx + v2*my + v3*mz) * by = (v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * bz = (v3*mx + v5*my + v6*mz) * res[l] += volume*(fx*bx + fy*by + fz*bz) */ __pyx_v_by = (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); /* "fatiando/gravmag/_sphere.pyx":68 * bx = (v1*mx + v2*my + v3*mz) * by = (v2*mx + v4*my + v5*mz) * bz = (v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< * res[l] += volume*(fx*bx + fy*by + fz*bz) * */ __pyx_v_bz = (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); /* "fatiando/gravmag/_sphere.pyx":69 * by = (v2*mx + v4*my + v5*mz) * bz = (v3*mx + v5*my + v6*mz) * res[l] += volume*(fx*bx + fy*by + fz*bz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_volume * (((__pyx_v_fx * __pyx_v_bx) + (__pyx_v_fy * __pyx_v_by)) + (__pyx_v_fz * __pyx_v_bz))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":51 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":40 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":73 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_3bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_2bx[] = "bx(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_3bx = {__Pyx_NAMESTR("bx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_3bx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_2bx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_3bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_2bx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_2bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_volume; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_5; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v3; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":82 * cdef DTYPE_T x, y, z * cdef DTYPE_T volume, r_sqr, r_5, v1, v2, v3 * size = len(xp) # <<<<<<<<<<<<<< * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":83 * cdef DTYPE_T volume, r_sqr, r_5, v1, v2, v3 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_pi); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_float_4_, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_float_3_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_volume = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":84 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":85 * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_v1) lastprivate(__pyx_v_r_5) lastprivate(__pyx_v_x) lastprivate(__pyx_v_v3) lastprivate(__pyx_v_r_sqr) lastprivate(__pyx_v_v2) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_z) lastprivate(__pyx_v_y) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_v1 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v3 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v2 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":88 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":89 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":90 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":91 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * r_5 = r_sqr**(2.5) * v1 = kernelxx(x, y, z, r_sqr, r_5) */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":92 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) # <<<<<<<<<<<<<< * v1 = kernelxx(x, y, z, r_sqr, r_5) * v2 = kernelxy(x, y, z, r_sqr, r_5) */ __pyx_v_r_5 = pow(((double)__pyx_v_r_sqr), 2.5); /* "fatiando/gravmag/_sphere.pyx":93 * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) * v1 = kernelxx(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v2 = kernelxy(x, y, z, r_sqr, r_5) * v3 = kernelxz(x, y, z, r_sqr, r_5) */ __pyx_v_v1 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxx(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":94 * r_5 = r_sqr**(2.5) * v1 = kernelxx(x, y, z, r_sqr, r_5) * v2 = kernelxy(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v3 = kernelxz(x, y, z, r_sqr, r_5) * res[l] += volume*(v1*mx + v2*my + v3*mz) */ __pyx_v_v2 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":95 * v1 = kernelxx(x, y, z, r_sqr, r_5) * v2 = kernelxy(x, y, z, r_sqr, r_5) * v3 = kernelxz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * res[l] += volume*(v1*mx + v2*my + v3*mz) * */ __pyx_v_v3 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":96 * v2 = kernelxy(x, y, z, r_sqr, r_5) * v3 = kernelxz(x, y, z, r_sqr, r_5) * res[l] += volume*(v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_volume * (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":84 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":73 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":100 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_5by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_4by[] = "by(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_5by = {__Pyx_NAMESTR("by"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_5by, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_4by)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_5by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("by (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "by") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_4by(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_4by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_volume; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_5; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v5; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("by", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":109 * cdef DTYPE_T x, y, z * cdef DTYPE_T volume, r_sqr, r_5, v2, v4, v5 * size = len(xp) # <<<<<<<<<<<<<< * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":110 * cdef DTYPE_T volume, r_sqr, r_5, v2, v4, v5 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_pi); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_float_4_, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_float_3_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_volume = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":111 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":112 * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_v5) lastprivate(__pyx_v_y) lastprivate(__pyx_v_r_sqr) lastprivate(__pyx_v_v2) lastprivate(__pyx_v_x) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_z) lastprivate(__pyx_v_r_5) lastprivate(__pyx_v_v4) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_v5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v2 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v4 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":115 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":116 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":117 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":118 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * r_5 = r_sqr**(2.5) * v2 = kernelxy(x, y, z, r_sqr, r_5) */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":119 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) # <<<<<<<<<<<<<< * v2 = kernelxy(x, y, z, r_sqr, r_5) * v4 = kernelyy(x, y, z, r_sqr, r_5) */ __pyx_v_r_5 = pow(((double)__pyx_v_r_sqr), 2.5); /* "fatiando/gravmag/_sphere.pyx":120 * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) * v2 = kernelxy(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v4 = kernelyy(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) */ __pyx_v_v2 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":121 * r_5 = r_sqr**(2.5) * v2 = kernelxy(x, y, z, r_sqr, r_5) * v4 = kernelyy(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v5 = kernelyz(x, y, z, r_sqr, r_5) * res[l] += volume*(v2*mx + v4*my + v5*mz) */ __pyx_v_v4 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelyy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":122 * v2 = kernelxy(x, y, z, r_sqr, r_5) * v4 = kernelyy(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * res[l] += volume*(v2*mx + v4*my + v5*mz) * */ __pyx_v_v5 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelyz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":123 * v4 = kernelyy(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) * res[l] += volume*(v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_volume * (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":111 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":100 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":127 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_7bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_6bz[] = "bz(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_7bz = {__Pyx_NAMESTR("bz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_7bz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_6bz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_7bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_6bz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_6bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_volume; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_5; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_v6; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":136 * cdef DTYPE_T x, y, z * cdef DTYPE_T volume, r_sqr, r_5, v3, v5, v6 * size = len(xp) # <<<<<<<<<<<<<< * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":137 * cdef DTYPE_T volume, r_sqr, r_5, v3, v5, v6 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_pi); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Multiply(__pyx_float_4_, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_float_3_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_3); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_volume = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":138 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":139 * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_v5) lastprivate(__pyx_v_z) lastprivate(__pyx_v_y) lastprivate(__pyx_v_r_5) lastprivate(__pyx_v_r_sqr) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_x) lastprivate(__pyx_v_v6) lastprivate(__pyx_v_v3) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_v5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_5 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v6 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_v3 = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":142 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":143 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":144 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":145 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * r_5 = r_sqr**(2.5) * v3 = kernelxz(x, y, z, r_sqr, r_5) */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":146 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) # <<<<<<<<<<<<<< * v3 = kernelxz(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) */ __pyx_v_r_5 = pow(((double)__pyx_v_r_sqr), 2.5); /* "fatiando/gravmag/_sphere.pyx":147 * r_sqr = x**2 + y**2 + z**2 * r_5 = r_sqr**(2.5) * v3 = kernelxz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v5 = kernelyz(x, y, z, r_sqr, r_5) * v6 = kernelzz(x, y, z, r_sqr, r_5) */ __pyx_v_v3 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelxz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":148 * r_5 = r_sqr**(2.5) * v3 = kernelxz(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * v6 = kernelzz(x, y, z, r_sqr, r_5) * res[l] += volume*(v3*mx + v5*my + v6*mz) */ __pyx_v_v5 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelyz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":149 * v3 = kernelxz(x, y, z, r_sqr, r_5) * v5 = kernelyz(x, y, z, r_sqr, r_5) * v6 = kernelzz(x, y, z, r_sqr, r_5) # <<<<<<<<<<<<<< * res[l] += volume*(v3*mx + v5*my + v6*mz) * */ __pyx_v_v6 = __pyx_f_8fatiando_7gravmag_7_sphere_kernelzz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, __pyx_v_r_5); /* "fatiando/gravmag/_sphere.pyx":150 * v5 = kernelyz(x, y, z, r_sqr, r_5) * v6 = kernelzz(x, y, z, r_sqr, r_5) * res[l] += volume*(v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_volume * (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":138 * size = len(xp) * volume = 4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":127 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":154 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_9gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_8gz[] = "gz(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_9gz = {__Pyx_NAMESTR("gz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_9gz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_8gz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_9gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_8gz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_8gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_cb; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":162 * cdef unsigned int l, size * cdef DTYPE_T mass, r_cb, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":163 * cdef DTYPE_T mass, r_cb, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":164 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":165 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_y) lastprivate(__pyx_v_z) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_r_cb) lastprivate(__pyx_v_x) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_cb = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":168 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":169 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_cb = (x**2 + y**2 + z**2)**(1.5) */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":170 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_cb = (x**2 + y**2 + z**2)**(1.5) * res[l] += mass*kernelz(x, y, z, r_cb) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":171 * y = yc - yp[l] * z = zc - zp[l] * r_cb = (x**2 + y**2 + z**2)**(1.5) # <<<<<<<<<<<<<< * res[l] += mass*kernelz(x, y, z, r_cb) * */ __pyx_v_r_cb = pow(((double)((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0))), 1.5); /* "fatiando/gravmag/_sphere.pyx":172 * z = zc - zp[l] * r_cb = (x**2 + y**2 + z**2)**(1.5) * res[l] += mass*kernelz(x, y, z, r_cb) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_cb)); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":164 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":154 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":176 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_11gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_10gxx[] = "gxx(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_11gxx = {__Pyx_NAMESTR("gxx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_11gxx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_10gxx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_11gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_10gxx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_10gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":184 * cdef unsigned int l, size * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":185 * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":186 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":187 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_x) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_z) lastprivate(__pyx_v_r_sqr) lastprivate(__pyx_v_y) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":190 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":191 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":192 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelxx(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":193 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * res[l] += mass*kernelxx(x, y, z, r_sqr, r_sqr**(2.5)) * */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":194 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelxx(x, y, z, r_sqr, r_sqr**(2.5)) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelxx(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, pow(((double)__pyx_v_r_sqr), 2.5))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":186 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":176 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":198 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_13gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_12gxy[] = "gxy(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_13gxy = {__Pyx_NAMESTR("gxy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_13gxy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_12gxy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_13gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_12gxy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_12gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":206 * cdef unsigned int l, size * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 206; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":207 * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":208 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":209 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_y) lastprivate(__pyx_v_x) lastprivate(__pyx_v_z) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_r_sqr) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":212 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":213 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":214 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelxy(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":215 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * res[l] += mass*kernelxy(x, y, z, r_sqr, r_sqr**(2.5)) * */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":216 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelxy(x, y, z, r_sqr, r_sqr**(2.5)) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelxy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, pow(((double)__pyx_v_r_sqr), 2.5))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":208 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":198 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":220 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_15gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_14gxz[] = "gxz(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_15gxz = {__Pyx_NAMESTR("gxz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_15gxz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_14gxz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_15gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 225; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_14gxz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_14gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":228 * cdef unsigned int l, size * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":229 * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":230 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":231 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_x) lastprivate(__pyx_v_z) lastprivate(__pyx_v_r_sqr) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_y) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":234 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":235 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":236 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelxz(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":237 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * res[l] += mass*kernelxz(x, y, z, r_sqr, r_sqr**(2.5)) * */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":238 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelxz(x, y, z, r_sqr, r_sqr**(2.5)) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelxz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, pow(((double)__pyx_v_r_sqr), 2.5))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":230 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":220 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":242 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_17gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_16gyy[] = "gyy(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_17gyy = {__Pyx_NAMESTR("gyy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_17gyy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_16gyy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_17gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 243; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_16gyy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_16gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":250 * cdef unsigned int l, size * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":251 * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":252 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":253 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_x) lastprivate(__pyx_v_z) lastprivate(__pyx_v_y) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_r_sqr) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":256 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":257 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":258 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelyy(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":259 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * res[l] += mass*kernelyy(x, y, z, r_sqr, r_sqr**(2.5)) * */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":260 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelyy(x, y, z, r_sqr, r_sqr**(2.5)) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelyy(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, pow(((double)__pyx_v_r_sqr), 2.5))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":252 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":242 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":264 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_19gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_18gyz[] = "gyz(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_19gyz = {__Pyx_NAMESTR("gyz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_19gyz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_18gyz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_19gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 268; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_18gyz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_18gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":272 * cdef unsigned int l, size * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":273 * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 273; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":274 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":275 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_x) lastprivate(__pyx_v_y) lastprivate(__pyx_v_z) lastprivate(__pyx_v_r_sqr) firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":278 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":279 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":280 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelyz(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":281 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * res[l] += mass*kernelyz(x, y, z, r_sqr, r_sqr**(2.5)) * */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":282 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelyz(x, y, z, r_sqr, r_sqr**(2.5)) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelyz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, pow(((double)__pyx_v_r_sqr), 2.5))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":274 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":264 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_sphere.pyx":286 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_21gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_7_sphere_20gzz[] = "gzz(ndarray xp, ndarray yp, ndarray zp, double xc, double yc, double zc, double radius, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_7_sphere_21gzz = {__Pyx_NAMESTR("gzz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_7_sphere_21gzz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_7_sphere_20gzz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_7_sphere_21gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; double __pyx_v_xc; double __pyx_v_yc; double __pyx_v_zc; double __pyx_v_radius; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gzz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_xc,&__pyx_n_s_yc,&__pyx_n_s_zc,&__pyx_n_s_radius,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zc)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_radius)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gzz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_xc = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_xc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_yc = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_yc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_zc = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_zc == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_radius = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_radius == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._sphere.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_7_sphere_20gzz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_xc, __pyx_v_yc, __pyx_v_zc, __pyx_v_radius, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_7_sphere_20gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, double __pyx_v_xc, double __pyx_v_yc, double __pyx_v_zc, double __pyx_v_radius, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_l; CYTHON_UNUSED unsigned int __pyx_v_size; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_mass; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_r_sqr; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_x; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_y; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_v_z; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gzz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_sphere.pyx":294 * cdef unsigned int l, size * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) # <<<<<<<<<<<<<< * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_xp)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 294; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_sphere.pyx":295 * cdef DTYPE_T mass, r_sqr, x, y, z * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. # <<<<<<<<<<<<<< * with nogil: * for l in prange(size): */ __pyx_t_2 = PyFloat_FromDouble((__pyx_v_density * 4.)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Multiply(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyFloat_FromDouble(pow(__pyx_v_radius, 3.0)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_float_3_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (npy_double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 295; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_mass = __pyx_t_5; /* "fatiando/gravmag/_sphere.pyx":296 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_sphere.pyx":297 * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: * for l in prange(size): # <<<<<<<<<<<<<< * # First thing to do is make the computation point P the origin of * # the coordinate system */ __pyx_t_6 = __pyx_v_size; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_8 = (__pyx_t_6 - 0) / 1; if (__pyx_t_8 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_12, __pyx_t_11, __pyx_t_9, __pyx_t_10) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_l) lastprivate(__pyx_v_l) lastprivate(__pyx_v_z) lastprivate(__pyx_v_y) lastprivate(__pyx_v_r_sqr) lastprivate(__pyx_v_x) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_l = 0 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_z = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_y = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_r_sqr = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); __pyx_v_x = ((__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_sphere.pyx":300 * # First thing to do is make the computation point P the origin of * # the coordinate system * x = xc - xp[l] # <<<<<<<<<<<<<< * y = yc - yp[l] * z = zc - zp[l] */ __pyx_t_9 = __pyx_v_l; __pyx_v_x = (__pyx_v_xc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":301 * # the coordinate system * x = xc - xp[l] * y = yc - yp[l] # <<<<<<<<<<<<<< * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 */ __pyx_t_10 = __pyx_v_l; __pyx_v_y = (__pyx_v_yc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":302 * x = xc - xp[l] * y = yc - yp[l] * z = zc - zp[l] # <<<<<<<<<<<<<< * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelzz(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_t_11 = __pyx_v_l; __pyx_v_z = (__pyx_v_zc - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_sphere.pyx":303 * y = yc - yp[l] * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 # <<<<<<<<<<<<<< * res[l] += mass*kernelzz(x, y, z, r_sqr, r_sqr**(2.5)) */ __pyx_v_r_sqr = ((pow(__pyx_v_x, 2.0) + pow(__pyx_v_y, 2.0)) + pow(__pyx_v_z, 2.0)); /* "fatiando/gravmag/_sphere.pyx":304 * z = zc - zp[l] * r_sqr = x**2 + y**2 + z**2 * res[l] += mass*kernelzz(x, y, z, r_sqr, r_sqr**(2.5)) # <<<<<<<<<<<<<< */ __pyx_t_12 = __pyx_v_l; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_7_sphere_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_mass * __pyx_f_8fatiando_7gravmag_7_sphere_kernelzz(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_r_sqr, pow(((double)__pyx_v_r_sqr), 2.5))); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_sphere.pyx":296 * size = len(xp) * mass = density*4.*numpy.pi*(radius**3)/3. * with nogil: # <<<<<<<<<<<<<< * for l in prange(size): * # First thing to do is make the computation point P the origin of */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "fatiando/gravmag/_sphere.pyx":286 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._sphere.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_2 = (__pyx_v_copy_shape != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0); if (__pyx_t_1) { __pyx_t_2 = (__pyx_v_little_endian != 0); } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_1) { __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} break; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0); if (__pyx_t_6) { __pyx_t_7 = (__pyx_v_little_endian != 0); } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_6) { __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("_sphere"), __Pyx_DOCSTR(__pyx_k_Cython_implementation_of_the_gr), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_bx, __pyx_k_bx, sizeof(__pyx_k_bx), 0, 0, 1, 1}, {&__pyx_n_s_by, __pyx_k_by, sizeof(__pyx_k_by), 0, 0, 1, 1}, {&__pyx_n_s_bz, __pyx_k_bz, sizeof(__pyx_k_bz), 0, 0, 1, 1}, {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1}, {&__pyx_n_s_fatiando_gravmag__sphere, __pyx_k_fatiando_gravmag__sphere, sizeof(__pyx_k_fatiando_gravmag__sphere), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_fx, __pyx_k_fx, sizeof(__pyx_k_fx), 0, 0, 1, 1}, {&__pyx_n_s_fy, __pyx_k_fy, sizeof(__pyx_k_fy), 0, 0, 1, 1}, {&__pyx_n_s_fz, __pyx_k_fz, sizeof(__pyx_k_fz), 0, 0, 1, 1}, {&__pyx_n_s_gxx, __pyx_k_gxx, sizeof(__pyx_k_gxx), 0, 0, 1, 1}, {&__pyx_n_s_gxy, __pyx_k_gxy, sizeof(__pyx_k_gxy), 0, 0, 1, 1}, {&__pyx_n_s_gxz, __pyx_k_gxz, sizeof(__pyx_k_gxz), 0, 0, 1, 1}, {&__pyx_n_s_gyy, __pyx_k_gyy, sizeof(__pyx_k_gyy), 0, 0, 1, 1}, {&__pyx_n_s_gyz, __pyx_k_gyz, sizeof(__pyx_k_gyz), 0, 0, 1, 1}, {&__pyx_n_s_gz, __pyx_k_gz, sizeof(__pyx_k_gz), 0, 0, 1, 1}, {&__pyx_n_s_gzz, __pyx_k_gzz, sizeof(__pyx_k_gzz), 0, 0, 1, 1}, {&__pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_k_home_leo_src_fatiando_fatiando, sizeof(__pyx_k_home_leo_src_fatiando_fatiando), 0, 0, 1, 0}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mass, __pyx_k_mass, sizeof(__pyx_k_mass), 0, 0, 1, 1}, {&__pyx_n_s_mx, __pyx_k_mx, sizeof(__pyx_k_mx), 0, 0, 1, 1}, {&__pyx_n_s_my, __pyx_k_my, sizeof(__pyx_k_my), 0, 0, 1, 1}, {&__pyx_n_s_mz, __pyx_k_mz, sizeof(__pyx_k_mz), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_pi, __pyx_k_pi, sizeof(__pyx_k_pi), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_r_5, __pyx_k_r_5, sizeof(__pyx_k_r_5), 0, 0, 1, 1}, {&__pyx_n_s_r_cb, __pyx_k_r_cb, sizeof(__pyx_k_r_cb), 0, 0, 1, 1}, {&__pyx_n_s_r_sqr, __pyx_k_r_sqr, sizeof(__pyx_k_r_sqr), 0, 0, 1, 1}, {&__pyx_n_s_radius, __pyx_k_radius, sizeof(__pyx_k_radius), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tf, __pyx_k_tf, sizeof(__pyx_k_tf), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1}, {&__pyx_n_s_v2, __pyx_k_v2, sizeof(__pyx_k_v2), 0, 0, 1, 1}, {&__pyx_n_s_v3, __pyx_k_v3, sizeof(__pyx_k_v3), 0, 0, 1, 1}, {&__pyx_n_s_v4, __pyx_k_v4, sizeof(__pyx_k_v4), 0, 0, 1, 1}, {&__pyx_n_s_v5, __pyx_k_v5, sizeof(__pyx_k_v5), 0, 0, 1, 1}, {&__pyx_n_s_v6, __pyx_k_v6, sizeof(__pyx_k_v6), 0, 0, 1, 1}, {&__pyx_n_s_volume, __pyx_k_volume, sizeof(__pyx_k_volume), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xc, __pyx_k_xc, sizeof(__pyx_k_xc), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_yc, __pyx_k_yc, sizeof(__pyx_k_yc), 0, 0, 1, 1}, {&__pyx_n_s_yp, __pyx_k_yp, sizeof(__pyx_k_yp), 0, 0, 1, 1}, {&__pyx_n_s_z, __pyx_k_z, sizeof(__pyx_k_z), 0, 0, 1, 1}, {&__pyx_n_s_zc, __pyx_k_zc, sizeof(__pyx_k_zc), 0, 0, 1, 1}, {&__pyx_n_s_zp, __pyx_k_zp, sizeof(__pyx_k_zp), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "fatiando/gravmag/_sphere.pyx":40 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__7 = PyTuple_Pack(31, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_fx, __pyx_n_s_fy, __pyx_n_s_fz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_volume, __pyx_n_s_r_sqr, __pyx_n_s_r_5, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_bx, __pyx_n_s_by, __pyx_n_s_bz); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(14, 0, 31, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_tf, 40, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":73 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__9 = PyTuple_Pack(22, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_volume, __pyx_n_s_r_sqr, __pyx_n_s_r_5, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(11, 0, 22, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bx, 73, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":100 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__11 = PyTuple_Pack(22, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_volume, __pyx_n_s_r_sqr, __pyx_n_s_r_5, __pyx_n_s_v2, __pyx_n_s_v4, __pyx_n_s_v5); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(11, 0, 22, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_by, 100, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":127 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__13 = PyTuple_Pack(22, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z, __pyx_n_s_volume, __pyx_n_s_r_sqr, __pyx_n_s_r_5, __pyx_n_s_v3, __pyx_n_s_v5, __pyx_n_s_v6); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(11, 0, 22, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bz, 127, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":154 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__15 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_cb, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gz, 154, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":176 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__17 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_sqr, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxx, 176, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":198 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__19 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_sqr, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxy, 198, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":220 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__21 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_sqr, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxz, 220, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":242 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__23 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_sqr, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyy, 242, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":264 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__25 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_sqr, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyz, 264, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_sphere.pyx":286 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__27 = PyTuple_Pack(16, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_xc, __pyx_n_s_yc, __pyx_n_s_zc, __pyx_n_s_radius, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_l, __pyx_n_s_size, __pyx_n_s_mass, __pyx_n_s_r_sqr, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(9, 0, 16, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gzz, 286, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_float_3_ = PyFloat_FromDouble(3.); if (unlikely(!__pyx_float_3_)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_float_4_ = PyFloat_FromDouble(4.); if (unlikely(!__pyx_float_4_)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_sphere(void); /*proto*/ PyMODINIT_FUNC init_sphere(void) #else PyMODINIT_FUNC PyInit__sphere(void); /*proto*/ PyMODINIT_FUNC PyInit__sphere(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__sphere(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_sphere"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_Cython_implementation_of_the_gr), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_fatiando__gravmag___sphere) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "fatiando.gravmag._sphere")) { if (unlikely(PyDict_SetItemString(modules, "fatiando.gravmag._sphere", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "fatiando/gravmag/_sphere.pyx":6 * """ * from __future__ import division * import numpy # <<<<<<<<<<<<<< * from cython.parallel cimport prange, parallel * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "fatiando/gravmag/_sphere.pyx":14 * cimport openmp * * DTYPE = numpy.float # <<<<<<<<<<<<<< * ctypedef numpy.float_t DTYPE_T * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":40 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_1tf, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_tf, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":73 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_3bx, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":100 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_5by, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_by, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":127 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_7bz, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":154 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_9gz, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":176 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_11gxx, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":198 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_13gxy, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":220 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_15gxz, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":242 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_17gyy, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":264 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_19gyz, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 264; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":286 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_7_sphere_21gzz, NULL, __pyx_n_s_fatiando_gravmag__sphere); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gzz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_sphere.pyx":1 * #cython: embedsignature=True # <<<<<<<<<<<<<< * """ * Cython implementation of the gravity and magnetic fields of spheres. */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init fatiando.gravmag._sphere", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fatiando.gravmag._sphere"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (result) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; #endif result = (*call)(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_DATA_SIZE(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
example-omp.c
// PWR004: Declare OpenMP scoping for all variables // https://www.appentra.com/knowledge/checks/pwr004 void example(int* result, unsigned size) { int factor = 42; // No data scoping is specified #pragma omp parallel for for (int i = 0; i < size; i++) { result[i] = factor * i; } }
task_tied_threadid.c
// RUN: %libomp-compile-and-run // REQUIRES: abt #include "omp_testsuite.h" #include <string.h> #include <stdio.h> int test_task_tied_threadid(int num_threads) { int i, vals[NUM_TASKS]; memset(vals, 0, sizeof(vals)); #pragma omp parallel num_threads(num_threads) { #pragma omp master { for (i = 0; i < NUM_TASKS; i++) { #pragma omp task firstprivate(i) { int omp_thread_id = omp_get_thread_num(); ABT_thread abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread)); // Context switching in OpenMP. #pragma omp taskyield int omp_thread_id2 = omp_get_thread_num(); if (omp_thread_id == omp_thread_id2) { vals[i] += 1; } ABT_thread abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); ABT_bool abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, &abt_thread_equal)); if (abt_thread_equal == ABT_TRUE) { vals[i] += 2; } // Context switching in Argobots. ABT_EXIT_IF_FAIL(ABT_thread_yield()); int omp_thread_id3 = omp_get_thread_num(); if (omp_thread_id2 == omp_thread_id3) { vals[i] += 4; } } } } } for (i = 0; i < NUM_TASKS; i++) { if (vals[i] != 7) { printf("vals[%d] == %d\n", i, vals[i]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_task_tied_threadid(i + 1)) { num_failed++; } } return num_failed; }
openmp_error.c
#include <stdio.h> #include <omp.h> #define N 10000 int main (int argc, char **argv) { int a[N]; int j = N; // [...] initialisiere array #pragma omp parallel for for (int i = 0; i < N - 2; i++) { #pragma omp critical a[i] = a[i] + a[j]; j--; } }
c_qsort.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_qsort.c VERSION: 1.0 DATE: May 2004 AUTHOR: F. de Sande COMMENTS TO: sande@csi.ull.es DESCRIPTION: Parallel implementation of Quicksort using OpenMP Sorts an integer array COMMENTS: The code requires nested Parallelism. REFERENCES: C. A. R. Hoare, ACM Algorithm 64}: Quicksort", Communications of the ACM", vol. 4, no. 7, pg. 321. Jul 1961 http://en.wikipedia.org/wiki/Quicksort BASIC PRAGMAS: parallel for USAGE: ./c_qsort.par 2000000 INPUT: The size (in K) of the vector to sort OUTPUT: The code tests that the vector is sorted FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ //#include "OmpSCR.h" #include <omp.h> #define NUM_ARGS 1 #define NUM_TIMERS 1 #define KILO (1024) #define MEGA (1024 * 1024) #define DEFAULT_SIZE (2 * MEGA) #define MAXSIZE (9 * MEGA) #define NUM_STEPS 10 /* No. of iterations (number of vectors to sort) */ #define SIZEINIT 128 char USAGE_STR[] = "<size_in_Kb>"; int SIZE; int array[MAXSIZE]; /* ----------------------------------------------------------------------- PROTOTYPES * ----------------------------------------------------------------------- */ void initialize(int *v, int seed); void testit(int *v); void qs(int *v, int first, int last); /* ----------------------------------------------------------------------- IMPLEMENTATION * ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- Sets randomly the values for the array * ----------------------------------------------------------------------- */ void initialize(int *v, int seed) { unsigned i; srandom(seed); for(i = 0; i < SIZE; i++) v[i] = (int)random(); } /* ----------------------------------------------------------------------- Tests the result * ----------------------------------------------------------------------- */ void testit(int *v) { register int k; int not_sorted = 0; for (k = 0; k < SIZE - 1; k++) if (v[k] > v[k + 1]) { not_sorted = 1; break; } if (not_sorted) printf("Array NOT sorted.\n"); else printf("Array sorted.\n"); } /* ----------------------------------------------------------------------- */ void qs(int *v, int first, int last) { int start[2], end[2], pivot, i, temp; if (first < last) { start[1] = first; end[0] = last; pivot = v[(first + last) / 2]; while (start[1] <= end[0]) { while (v[start[1]] < pivot) start[1]++; while (pivot < v[end[0]]) end[0]--; if (start[1] <= end[0]) { temp = v[start[1]]; v[start[1]] = v[end[0]]; v[end[0]] = temp; start[1]++; end[0]--; } } start[0] = first; end[1] = last; #pragma omp parallel { #pragma omp for nowait for(i = 0; i <= 1; i++) { qs(v, start[i], end[i]); } } } } /* ----------------------------------------------------------------------- */ int main(int argc, char *argv[]) { int STEP, NUMTHREADS; double total_time; char *PARAM_NAMES[NUM_ARGS] = {"Size (in K)"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" }; char *DEFAULT_VALUES[NUM_ARGS] = {"2048 K"}; NUMTHREADS = 1; //omp_get_num_threads(); //OSCR_init (NUMTHREADS, "Quicksort", "Use 'qsort' <size (in K)>", NUM_ARGS, // PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, // argc, argv); SIZE = SIZEINIT; //OSCR_getarg_int(1); if (SIZE > MAXSIZE) { printf("Size: %d Maximum size: %d\n", SIZE, MAXSIZE); exit(-1); } /* Default: DEFAULT_SIZE */ for (STEP = 0; STEP < NUM_STEPS; STEP++) { initialize(array, STEP); //OSCR_timer_start(0); qs(array, 0, SIZE-1); OSCR_timer_stop(0); testit(array); } total_time = 1; //OSCR_timer_read(0); //OSCR_report(1, TIMERS_NAMES); printf("\n \t# THREADS \tSIZE \tSTEPS \tTIME (secs.) \n"); printf("\t%d \t\t%d \t%d \t%14.6lf \n", NUMTHREADS, SIZE, NUM_STEPS, total_time); } /* main */ /* * vim:ts=2:sw=2: */
gimple.h
/* Modula-3: modified */ /* Gimple IR definitions. Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H #include "pointer-set.h" #include "vec.h" #include "vecprim.h" #include "vecir.h" #include "ggc.h" #include "basic-block.h" #include "tree-ssa-operands.h" #include "tree-ssa-alias.h" EXTERN_C_START struct gimple_seq_node_d; typedef struct gimple_seq_node_d *gimple_seq_node; typedef const struct gimple_seq_node_d *const_gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef VEC(gimple, heap) *gimple_vec; DEF_VEC_P (gimple_vec); DEF_VEC_ALLOC_P (gimple_vec, heap); enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use sub-codes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_CANNOT_INLINE = 1 << 0, GF_CALL_FROM_THUNK = 1 << 1, GF_CALL_RETURN_SLOT_OPT = 1 << 2, GF_CALL_TAILCALL = 1 << 3, GF_CALL_VA_ARG_PACK = 1 << 4, GF_CALL_NOTHROW = 1 << 5, GF_OMP_PARALLEL_COMBINED = 1 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there's only one type of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* A node in a gimple_seq_d. */ struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d { gimple stmt; struct gimple_seq_node_d *prev; struct gimple_seq_node_d *next; }; /* A double-linked sequence of gimple statements. */ struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d { /* First and last statements in the sequence. */ gimple_seq_node first; gimple_seq_node last; /* Sequences are created/destroyed frequently. To minimize allocation activity, deallocated sequences are kept in a pool of available sequences. This is the pointer to the next free sequence in the pool. */ gimple_seq next_free; }; /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (const_gimple_seq s) { return s ? s->first : NULL; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return (n) ? n->stmt : NULL; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (const_gimple_seq s) { return s ? s->last : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return (n) ? n->stmt : NULL; } /* Set the last node in GIMPLE sequence S to LAST. */ static inline void gimple_seq_set_last (gimple_seq s, gimple_seq_node last) { s->last = last; } /* Set the first node in GIMPLE sequence S to FIRST. */ static inline void gimple_seq_set_first (gimple_seq s, gimple_seq_node first) { s->first = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (const_gimple_seq s) { return s == NULL || s->first == NULL; } void gimple_seq_add_stmt (gimple_seq *, gimple); /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimplify_seq_add_stmt (gimple_seq *, gimple); /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple->seq = seq; } /* Iterator object for GIMPLE statement sequences. */ typedef struct { /* Sequence node holding the current statement. */ gimple_seq_node ptr; /* Sequence and basic block holding the statement. These fields are necessary to handle edge cases such as when statement is added to an empty basic block or when the last statement of a block/sequence is removed. */ gimple_seq seq; basic_block bb; } gimple_stmt_iterator; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY(()) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code, code, 8); /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* Padding to get subcode to 16 bit alignment. */ unsigned pad : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ struct basic_block_def *bb; /* [ WORD 4 ] Lexical block holding this statement. */ tree block; }; /* Base structure for tuples with operands. */ struct GTY(()) gimple_statement_with_ops_base { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct def_optype_d GTY((skip (""))) *def_ops; struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY(()) gimple_statement_with_ops { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops_base { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7-8 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY(()) gimple_statement_call { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9-12 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 13 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY(()) gimple_statement_omp { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY(()) gimple_statement_bind { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Variables declared in this scope. */ tree vars; /* [ WORD 6 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY(()) gimple_statement_catch { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree types; /* [ WORD 6 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY(()) gimple_statement_eh_filter { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Filter types. */ tree types; /* [ WORD 6 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY(()) gimple_statement_eh_mnt { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY(()) gimple_statement_phi { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ unsigned capacity; unsigned nargs; /* [ WORD 6 ] */ tree result; /* [ WORD 7 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY(()) gimple_statement_eh_ctrl { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Exception region number. */ int region; }; /* GIMPLE_TRY */ struct GTY(()) gimple_statement_try { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 6 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY(()) gimple_statement_wce { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 5 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY(()) gimple_statement_asm { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] __asm__ statement. */ const char *string; /* [ WORD 10 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 11 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY(()) gimple_statement_omp_critical { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY(()) gimple_statement_omp_for { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 8 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 9 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL */ struct GTY(()) gimple_statement_omp_parallel { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Clauses. */ tree clauses; /* [ WORD 7 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 8 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_TASK */ struct GTY(()) gimple_statement_omp_task { /* [ WORD 1-8 ] */ struct gimple_statement_omp_parallel par; /* [ WORD 9 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 10-11 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY(()) gimple_statement_omp_sections { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY(()) gimple_statement_omp_continue { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree control_def; /* [ WORD 6 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE */ struct GTY(()) gimple_statement_omp_single { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY(()) gimple_statement_omp_atomic_load { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY(()) gimple_statement_omp_atomic_store { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree val; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT /* Define the overall contents of a gimple tuple. It may be any of the structures declared above for various types of tuples. */ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d { struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase; struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops; struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase; struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem; struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call; struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp; struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind; struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter; struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt; struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl; struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce; struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm; struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical; struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for; struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel; struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task; struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections; struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single; struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue; struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; }; /* In gimple.c. */ /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree, tree, tree MEM_STAT_DECL); #define gimple_build_assign_with_ops(c,o1,o2,o3) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO) #define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO) gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_call_vec (tree, VEC(tree, heap) *); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_from_tree (tree); gimple gimplify_assign (tree, tree, gimple_seq *); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_eh_dispatch (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (unsigned, tree, tree, ...); gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq); gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_cdt (tree, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (VEC(tree,heap) *); void gimple_set_body (tree, gimple_seq); gimple_seq gimple_body (tree); bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); int gimple_call_flags (const_gimple); int gimple_call_return_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); void gimple_call_reset_alias_info (gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, struct basic_block_def *); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); void gimple_set_modified (gimple, bool); void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); bool gimple_rhs_has_side_effects (const_gimple); bool gimple_could_trap_p (gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_assign_rhs_could_trap_p (gimple); void gimple_regimplify_operands (gimple, gimple_stmt_iterator *); bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); const char *gimple_decl_printable_name (tree, int); bool gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace); tree gimple_get_virt_mehtod_for_binfo (HOST_WIDE_INT, tree, tree *, bool); void gimple_adjust_this_by_delta (gimple_stmt_iterator *, tree); /* Returns true iff T is a valid GIMPLE statement. */ extern bool is_gimple_stmt (tree); /* Returns true iff TYPE is a valid type for a scalar register variable. */ extern bool is_gimple_reg_type (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is any sort of symbol. */ extern bool is_gimple_id (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is something whose address can be taken. */ extern bool is_gimple_addressable (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE address. */ bool is_gimple_address (const_tree); /* Returns true iff T is a GIMPLE invariant address. */ bool is_gimple_invariant_address (const_tree); /* Returns true iff T is a GIMPLE invariant address at interprocedural level. */ bool is_gimple_ip_invariant_address (const_tree); /* Returns true iff T is a valid GIMPLE constant. */ bool is_gimple_constant (const_tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (const_tree); /* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ extern bool is_gimple_ip_invariant (const_tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a GIMPLE asm statement input. */ extern bool is_gimple_asm_val (tree); /* Returns true iff T is a valid address operand of a MEM_REF. */ bool is_gimple_mem_ref_addr (tree); /* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a GIMPLE temporary, a renamed user variable, or something else, respectively. */ extern bool is_gimple_reg_rhs (tree); extern bool is_gimple_mem_rhs (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a variable that does not need to live in memory. */ extern bool is_gimple_non_addressable (tree t); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); /* If T makes a function call, returns the CALL_EXPR operand. */ extern tree get_call_expr_in (tree t); extern void recalculate_side_effects (tree); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_register_type (tree); extern tree gimple_register_canonical_type (tree); enum gtc_mode { GTC_MERGE = 0, GTC_DIAG = 1 }; extern bool gimple_types_compatible_p (tree, tree, enum gtc_mode); extern void print_gimple_types_stats (void); extern void free_gimple_type_tables (void); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, unsigned *); extern bool walk_stmt_load_store_addr_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool walk_stmt_load_store_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_function); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern tree create_tmp_reg (tree, const char *); extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *); extern tree get_formal_tmp_var (tree, gimple_seq *); extern void declare_vars (tree, gimple, bool); extern void annotate_all_with_location (gimple_seq, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ typedef bool (*gimple_predicate)(tree); /* FIXME we should deduce this from the predicate. */ enum fallback { fb_none = 0, /* Do not generate a temporary. */ fb_rvalue = 1, /* Generate an rvalue to hold the result of a gimplified expression. */ fb_lvalue = 2, /* Generate an lvalue to hold the result of a gimplified expression. */ fb_mayfail = 4, /* Gimplification may fail. Error issued afterwards. */ fb_either= fb_rvalue | fb_lvalue }; typedef int fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; struct gimplify_ctx { struct gimplify_ctx *prev_context; VEC(gimple,heap) *bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; bool allow_rhs_cond_expr; }; extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, gimple_seq *); extern void gimplify_one_sizepos (tree *, gimple_seq *); extern bool gimplify_stmt (tree *, gimple_seq *); extern gimple gimplify_body (tree *, tree, bool); extern void push_gimplify_context (struct gimplify_ctx *); extern void pop_gimplify_context (gimple); extern void gimplify_and_add (tree, gimple_seq *); /* Miscellaneous helpers. */ extern void gimple_add_tmp_var (tree); extern gimple gimple_current_bind_expr (void); extern VEC(gimple, heap) *gimple_bind_expr_stack (void); extern tree voidify_wrapper_expr (tree, tree); extern tree build_and_jump (tree *); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *, gimple_seq *); struct gimplify_omp_ctx; extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree); extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); /* In omp-low.c. */ extern tree omp_reduction_init (tree, tree); /* In tree-nested.c. */ extern void lower_nested_functions (tree); extern void insert_field_into_struct (tree, tree); /* In gimplify.c. */ extern void gimplify_function_tree (tree); /* In cfgexpand.c. */ extern tree gimple_assign_rhs_to_tree (gimple); /* In builtins.c */ extern bool validate_gimple_arglist (const_gimple, ...); /* In tree-ssa.c */ extern bool tree_ssa_useless_type_conversion (tree); extern tree tree_ssa_strip_useless_type_conversions (tree); extern bool useless_type_conversion_p (tree, tree); extern bool types_compatible_p (tree, tree); /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->gsbase.code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline struct basic_block_def * gimple_bb (const_gimple g) { return g->gsbase.bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return g->gsbase.block; } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { g->gsbase.block = block; } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->gsbase.location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->gsbase.location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->gsbase.location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return gimple_location (g) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->gsbase.no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->gsbase.no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->gsbase.visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->gsbase.visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->gsbase.plf |= (unsigned int) plf; else stmt->gsbase.plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->gsbase.plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->gsbase.uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->gsbase.uid; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } /* Return the set of DEF operands for statement G. */ static inline struct def_optype_d * gimple_def_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.def_ops; } /* Set DEF to be the set of DEF operands for statement G. */ static inline void gimple_set_def_ops (gimple g, struct def_optype_d *def) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.def_ops = def; } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.use_ops = use; } /* Return the set of VUSE operand for statement G. */ static inline use_operand_p gimple_vuse_op (const_gimple g) { struct use_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_USE_OPERAND_P; ops = g->gsops.opbase.use_ops; if (ops && USE_OP_PTR (ops)->use == &g->gsmembase.vuse) return USE_OP_PTR (ops); return NULL_USE_OPERAND_P; } /* Return the set of VDEF operand for statement G. */ static inline def_operand_p gimple_vdef_op (const_gimple g) { struct def_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_DEF_OPERAND_P; ops = g->gsops.opbase.def_ops; if (ops && DEF_OP_PTR (ops) == &g->gsmembase.vdef) return DEF_OP_PTR (ops); return NULL_DEF_OPERAND_P; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->gsbase.subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Mark statement S as modified, and update it. */ static inline void update_stmt (gimple s) { if (gimple_has_ops (s)) { gimple_set_modified (s, true); update_stmt_operands (s); } } /* Update statement S if it has been optimized. */ static inline void update_stmt_if_modified (gimple s) { if (gimple_modified_p (s)) update_stmt_operands (s); } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->gsbase.has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->gsbase.has_volatile_ops = (unsigned) volatilep; } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_SINGLE); return s->gsbase.subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->gsbase.subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->gsbase.subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED; else g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->gsbase.num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->gsbase.num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* A wrapper around extract_ops_from_tree_1, for callers which expect to see only a maximum of two operands. */ static inline void extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, tree *op1) { tree op2; extract_ops_from_tree_1 (expr, code, op0, op1, &op2); gcc_assert (op2 == NULL_TREE); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->gsbase.nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->gsbase.nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->gsbase.subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->gsbase.subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { tree addr = gimple_call_fn (gs); if (TREE_CODE (addr) == ADDR_EXPR) { tree fndecl = TREE_OPERAND (addr, 0); if (TREE_CODE (fndecl) == MEM_REF) { if (TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR && integer_zerop (TREE_OPERAND (fndecl, 1))) return TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); else return NULL_TREE; } return TREE_OPERAND (addr, 0); } return NULL_TREE; } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree fn = gimple_call_fn (gs); tree type = TREE_TYPE (fn); /* See through the pointer. */ type = TREE_TYPE (type); /* The type returned by a FUNCTION_DECL is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->gsbase.subcode |= GF_CALL_TAILCALL; else s->gsbase.subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0; } /* Set the inlinable status of GIMPLE_CALL S to INLINABLE_P. */ static inline void gimple_call_set_cannot_inline (gimple s, bool inlinable_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (inlinable_p) s->gsbase.subcode |= GF_CALL_CANNOT_INLINE; else s->gsbase.subcode &= ~GF_CALL_CANNOT_INLINE; } /* Return true if GIMPLE_CALL S cannot be inlined. */ static inline bool gimple_call_cannot_inline_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_CANNOT_INLINE) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT; else s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->gsbase.subcode |= GF_CALL_FROM_THUNK; else s->gsbase.subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->gsbase.subcode |= GF_CALL_VA_ARG_PACK; else s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->gsbase.subcode |= GF_CALL_NOTHROW; else s->gsbase.subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->gsbase.subcode = orig_call->gsbase.subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->gsbase.subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->gsbase.subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->gsbase.subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->gsbase.subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (var != 0)' or 'if (var == 1)' */ static inline bool gimple_cond_single_var_p (gimple gs) { if (gimple_cond_code (gs) == NE_EXPR && gimple_cond_rhs (gs) == boolean_false_node) return true; if (gimple_cond_code (gs) == EQ_EXPR && gimple_cond_rhs (gs) == boolean_true_node) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars); } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.body; } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_stmt (&gs->gimple_bind.body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_seq (&gs->gimple_bind.body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { GIMPLE_CHECK (gs, GIMPLE_BIND); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); gs->gimple_bind.block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op (gs, index); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op_ptr (gs, index); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op (gs, index + gs->gimple_asm.ni); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op_ptr (gs, index + gs->gimple_asm.ni); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->gsbase.subcode |= GF_ASM_VOLATILE; else gs->gsbase.subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->gsbase.subcode |= GF_ASM_INPUT; else gs->gsbase.subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.types; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.handler; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.handler; } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.types; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.failure; } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); return gs->gimple_eh_mnt.fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); gs->gimple_eh_mnt.fndecl = decl; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->gsbase.subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.eval; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.cleanup; } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.cleanup = cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gimple_wce.cleanup; } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gimple_wce.cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gsbase.subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gsbase.subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return &gs->gimple_phi.result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { GIMPLE_CHECK (gs, GIMPLE_PHI); gs->gimple_phi.result = result; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity); return &(gs->gimple_phi.args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs); gs->gimple_phi.args[index] = *phiarg; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RESX); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_RESX); gs->gimple_eh_ctrl.region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); gs->gimple_eh_ctrl.region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { return gimple_switch_label (gs, 0); } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return gs->omp.body; } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { gs->omp.body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return gs->gimple_omp_critical.name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return &gs->gimple_omp_critical.name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); gs->gimple_omp_critical.name = name; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].incr = incr; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.pre_body; } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return gs->gimple_omp_single.clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return &gs->gimple_omp_single.clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); gs->gimple_omp_single.clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gimple_omp_atomic_store.val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return g->gimple_omp_atomic_store.val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return &g->gimple_omp_atomic_store.val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_use = use; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statment STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->gsbase.subcode |= GF_PREDICT_TAKEN; else gs->gsbase.subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) type = gimple_call_return_type (stmt); else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator gsi_start (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the first statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = bb; return i; } /* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ static inline gimple_stmt_iterator gsi_last (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the last statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = bb; return i; } /* Return true if I is at the end of its sequence. */ static inline bool gsi_end_p (gimple_stmt_iterator i) { return i.ptr == NULL; } /* Return true if I is one statement before the end of its sequence. */ static inline bool gsi_one_before_end_p (gimple_stmt_iterator i) { return i.ptr != NULL && i.ptr->next == NULL; } /* Advance the iterator to the next gimple statement. */ static inline void gsi_next (gimple_stmt_iterator *i) { i->ptr = i->ptr->next; } /* Advance the iterator to the previous gimple statement. */ static inline void gsi_prev (gimple_stmt_iterator *i) { i->ptr = i->ptr->prev; } /* Return the current stmt. */ static inline gimple gsi_stmt (gimple_stmt_iterator i) { return i.ptr->stmt; } /* Return a block statement iterator that points to the first non-label statement in block BB. */ static inline gimple_stmt_iterator gsi_after_labels (basic_block bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) gsi_next (&gsi); return gsi; } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_next_nondebug (gimple_stmt_iterator *i) { do { gsi_next (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_prev_nondebug (gimple_stmt_iterator *i) { do { gsi_prev (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Return a new iterator pointing to the first non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_start_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_next_nondebug (&i); return i; } /* Return a new iterator pointing to the last non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_last_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_prev_nondebug (&i); return i; } /* Return a pointer to the current stmt. NOTE: You may want to use gsi_replace on the iterator itself, as this performs additional bookkeeping that will not be done if you simply assign through a pointer returned by gsi_stmt_ptr. */ static inline gimple * gsi_stmt_ptr (gimple_stmt_iterator *i) { return &i->ptr->stmt; } /* Return the basic block associated with this iterator. */ static inline basic_block gsi_bb (gimple_stmt_iterator i) { return i.bb; } /* Return the sequence associated with this iterator. */ static inline gimple_seq gsi_seq (gimple_stmt_iterator i) { return i.seq; } enum gsi_iterator_update { GSI_NEW_STMT, /* Only valid when single statement is added, move iterator to it. */ GSI_SAME_STMT, /* Leave the iterator at the same statement. */ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements in the same direction. */ }; /* In gimple-iterator.c */ gimple_stmt_iterator gsi_start_phis (basic_block); gimple_seq gsi_split_seq_after (gimple_stmt_iterator); gimple_seq gsi_split_seq_before (gimple_stmt_iterator *); void gsi_replace (gimple_stmt_iterator *, gimple, bool); void gsi_insert_before (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_after (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_remove (gimple_stmt_iterator *, bool); gimple_stmt_iterator gsi_for_stmt (gimple); void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *); void gsi_insert_on_edge (edge, gimple); void gsi_insert_seq_on_edge (edge, gimple_seq); basic_block gsi_insert_on_edge_immediate (edge, gimple); basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); void gsi_commit_one_edge_insert (edge, basic_block *); void gsi_commit_edge_inserts (void); gimple gimple_call_copy_skip_args (gimple, bitmap); /* Convenience routines to walk all statements of a gimple function. Note that this is useful exclusively before the code is converted into SSA form. Once the program is in SSA form, the standard operand interface should be used to analyze/modify statements. */ struct walk_stmt_info { /* Points to the current statement being walked. */ gimple_stmt_iterator gsi; /* Additional data that the callback functions may want to carry through the recursion. */ void *info; /* Pointer map used to mark visited tree nodes when calling walk_tree on each operand. If set to NULL, duplicate tree nodes will be visited more than once. */ struct pointer_set_t *pset; /* Indicates whether the operand being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. Also note that CALLBACK should update this flag while walking the sub-expressions of a statement. For instance, when walking the statement 'foo (&var)', the flag VAL_ONLY will initially be set to true, however, when walking &var, the operand of that ADDR_EXPR does not need to be a GIMPLE value. */ bool val_only; /* True if we are currently walking the LHS of an assignment. */ bool is_lhs; /* Optional. Set to true by the callback functions if they made any changes. */ bool changed; /* True if we're interested in location information. */ bool want_locations; /* Operand returned by the callbacks. This is set when calling walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback returns non-NULL, this field will contain the tree returned by the last callback. */ tree callback_result; }; /* Callback for walk_gimple_stmt. Called for every statement found during traversal. The first argument points to the statement to walk. The second argument is a flag that the callback sets to 'true' if it the callback handled all the operands and sub-statements of the statement (the default value of this flag is 'false'). The third argument is an anonymous pointer to data to be used by the callback. */ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, struct walk_stmt_info *); gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *); #ifdef GATHER_STATISTICS /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_seq, /* Sequences. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } #endif /* GATHER_STATISTICS */ extern void dump_gimple_statistics (void); /* In gimple-fold.c. */ void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); tree gimple_fold_builtin (gimple); bool fold_stmt (gimple_stmt_iterator *); bool fold_stmt_inplace (gimple); tree maybe_fold_offset_to_address (location_t, tree, tree, tree); tree maybe_fold_offset_to_reference (location_t, tree, tree, tree); tree maybe_fold_stmt_addition (location_t, tree, tree, tree); tree get_symbol_constant_value (tree); tree canonicalize_constructor_val (tree); bool may_propagate_address_into_dereference (tree, tree); extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); EXTERN_C_END #endif /* GCC_GIMPLE_H */
plot.h
#ifndef OPENMC_PLOT_H #define OPENMC_PLOT_H #include <sstream> #include <unordered_map> #include "pugixml.hpp" #include "xtensor/xarray.hpp" #include "hdf5.h" #include "openmc/cell.h" #include "openmc/constants.h" #include "openmc/error.h" #include "openmc/geometry.h" #include "openmc/particle.h" #include "openmc/position.h" #include "openmc/random_lcg.h" #include "openmc/xml_interface.h" namespace openmc { //=============================================================================== // Global variables //=============================================================================== class Plot; namespace model { extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index extern vector<Plot> plots; //!< Plot instance container extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter extern int plotter_stream; // Stream index used by the plotter } // namespace model //=============================================================================== // RGBColor holds color information for plotted objects //=============================================================================== struct RGBColor { // Constructors RGBColor() : red(0), green(0), blue(0) {}; RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) {}; RGBColor(int r, int g, int b) : red(r), green(g), blue(b) {}; RGBColor(const vector<int>& v) { if (v.size() != 3) { throw std::out_of_range("Incorrect vector size for RGBColor."); } red = v[0]; green = v[1]; blue = v[2]; } bool operator==(const RGBColor& other) { return red == other.red && green == other.green && blue == other.blue; } // Members uint8_t red, green, blue; }; // some default colors const RGBColor WHITE {255, 255, 255}; const RGBColor RED {255, 0, 0}; typedef xt::xtensor<RGBColor, 2> ImageData; struct IdData { // Constructor IdData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids }; struct PropertyData { // Constructor PropertyData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data }; enum class PlotType { slice = 1, voxel = 2 }; enum class PlotBasis { xy = 1, xz = 2, yz = 3 }; enum class PlotColorBy { cells = 0, mats = 1 }; //=============================================================================== // Plot class //=============================================================================== class PlotBase { public: template<class T> T get_map() const; // Members public: Position origin_; //!< Plot origin in geometry Position width_; //!< Plot width in geometry PlotBasis basis_; //!< Plot basis (XY/XZ/YZ) array<size_t, 3> pixels_; //!< Plot size in pixels bool color_overlaps_; //!< Show overlapping cells? int level_; //!< Plot universe level }; template<class T> T PlotBase::get_map() const { size_t width = pixels_[0]; size_t height = pixels_[1]; // get pixel size double in_pixel = (width_[0]) / static_cast<double>(width); double out_pixel = (width_[1]) / static_cast<double>(height); // size data array T data(width, height); // setup basis indices and initial position centered on pixel int in_i, out_i; Position xyz = origin_; switch (basis_) { case PlotBasis::xy: in_i = 0; out_i = 1; break; case PlotBasis::xz: in_i = 0; out_i = 2; break; case PlotBasis::yz: in_i = 1; out_i = 2; break; default: UNREACHABLE(); } // set initial position xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.; xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.; // arbitrary direction Direction dir = {0.7071, 0.7071, 0.0}; #pragma omp parallel { Particle p; p.r() = xyz; p.u() = dir; p.coord(0).universe = model::root_universe; int level = level_; int j {}; #pragma omp for for (int y = 0; y < height; y++) { p.r()[out_i] = xyz[out_i] - out_pixel * y; for (int x = 0; x < width; x++) { p.r()[in_i] = xyz[in_i] + in_pixel * x; p.n_coord() = 1; // local variables bool found_cell = exhaustive_find_cell(p); j = p.n_coord() - 1; if (level >= 0) { j = level; } if (found_cell) { data.set_value(y, x, p, j); } if (color_overlaps_ && check_cell_overlap(p, false)) { data.set_overlap(y, x); } } // inner for } // outer for } // omp parallel return data; } class Plot : public PlotBase { public: // Constructor Plot(pugi::xml_node plot); // Methods private: void set_id(pugi::xml_node plot_node); void set_type(pugi::xml_node plot_node); void set_output_path(pugi::xml_node plot_node); void set_bg_color(pugi::xml_node plot_node); void set_basis(pugi::xml_node plot_node); void set_origin(pugi::xml_node plot_node); void set_width(pugi::xml_node plot_node); void set_universe(pugi::xml_node plot_node); void set_default_colors(pugi::xml_node plot_node); void set_user_colors(pugi::xml_node plot_node); void set_meshlines(pugi::xml_node plot_node); void set_mask(pugi::xml_node plot_node); void set_overlap_color(pugi::xml_node plot_node); // Members public: int id_; //!< Plot ID PlotType type_; //!< Plot type (Slice/Voxel) PlotColorBy color_by_; //!< Plot coloring (cell/material) int meshlines_width_; //!< Width of lines added to the plot int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot RGBColor meshlines_color_; //!< Color of meshlines on the plot RGBColor not_found_ {WHITE}; //!< Plot background color RGBColor overlap_color_ {RED}; //!< Plot overlap color vector<RGBColor> colors_; //!< Plot colors std::string path_plot_; //!< Plot output filename }; //=============================================================================== // Non-member functions //=============================================================================== //! Add mesh lines to image data of a plot object //! \param[in] plot object //! \param[out] image data associated with the plot object void draw_mesh_lines(Plot const& pl, ImageData& data); //! Write a ppm image to file using a plot object's image data //! \param[in] plot object //! \param[out] image data associated with the plot object void output_ppm(Plot const& pl, const ImageData& data); //! Initialize a voxel file //! \param[in] id of an open hdf5 file //! \param[in] dimensions of the voxel file (dx, dy, dz) //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to memory space of voxel data void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace, hid_t* dset, hid_t* memspace); //! Write a section of the voxel data to hdf5 //! \param[in] voxel slice //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to data to write void voxel_write_slice( int x, hid_t dspace, hid_t dset, hid_t memspace, void* buf); //! Close voxel file entities //! \param[in] data space to close //! \param[in] dataset to close //! \param[in] memory space to close void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace); //=============================================================================== // External functions //=============================================================================== //! Read plot specifications from a plots.xml file void read_plots_xml(); //! Create a ppm image for a plot object //! \param[in] plot object void create_ppm(Plot const& pl); //! Create an hdf5 voxel file for a plot object //! \param[in] plot object void create_voxel(Plot const& pl); //! Create a randomly generated RGB color //! \return RGBColor with random value RGBColor random_color(); } // namespace openmc #endif // OPENMC_PLOT_H
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=GetMagickResourceLimit(WidthResource); cache_info->height_limit=GetMagickResourceLimit(HeightResource); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads, sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { register ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; register ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) { #if defined(MAGICKCORE_HAVE_LINUX_SENDFILE) if (cache_info->length < 0x7ffff000) { count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL, (ssize_t) cache_info->length); if (count == (ssize_t) cache_info->length) return(MagickTrue); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); } #endif quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); } buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { register ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; register ssize_t i; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity) { length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); } source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
kwallet_fmt_plug.c
/* KDE KWallet cracker patch for JtR. Written by Narendra Kangralkar * <narendrakangralkar at gmail.com> and Dhiru Kholia <dhiru at openwall.com>. * * Also see https://github.com/gaganpreet/kwallet-dump ;) * * This software is Copyright (c) 2013 by above authors and it is hereby * released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_kwallet; #elif FMT_REGISTERS_H john_register_one(&fmt_kwallet); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/blowfish.h> #include "sha.h" #include "pbkdf2_hmac_sha512.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 // reduced for PBKDF2_SHA512 case #endif #endif #include "memdbg.h" #define FORMAT_LABEL "kwallet" #define FORMAT_NAME "KDE KWallet" #define FORMAT_TAG "$kwallet$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "SHA1 / PBKDF2-SHA512 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "SHA1 / PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*cur_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif // #define BENCH_LARGE_PASSWORDS 1 static struct fmt_tests kwallet_tests[] = { {"$kwallet$112$25be8c9cdaa53f5404d7809ff48a37752b325c8ccd296fbd537440dfcef9d66f72940e97141d21702b325c8ccd296fbd537440dfcef9d66fcd953cf1e41904b0c494ad1e718760e74c4487cc1449233d85525e7974da221774010bb9582b1d68b55ea9288f53a2be6bd15b93a5e1b33d", "openwall"}, {"$kwallet$240$e5383800cf0ccabf76461a647bf7ed94b7260f0ac33374ea1fec0bb0144b7e3f8fa3d0f368a61075827ac60beb62be830ece6fb2f9cfb13561ed4372af19d0a720a37b0d21132a59513b3ab9030395671c9725d7d6592ad98a4754795c858c59df6049522384af98c77d5351ddc577da07ea10e7d44b3fbc9af737744f53ed0a0a67252599b66a4d1fc65926d7097dc50f45b57f41f11934e0cfc4d5491f82b43f38acde1fd337d51cf47eb5da1bcd8bff1432d7b02f0d316633b33ced337d202a44342fc79db6aea568fb322831d886d4cb6dcc50a3e17c1027550b9ee94f56bc33f9861d2b24cbb7797d79f967bea4", ""}, #ifdef BENCH_LARGE_PASSWORDS {"$kwallet$240$f17296588b2dd9f22f7c9ec43fddb5ee28db5edcb69575dcb887f5d2d0bfcc9317773c0f4e32517ace087d33ace8155a099e16c259c1a2f4f8992fc17481b122ef9f0c38c9eafd46794ff34e32c3ad83345f2d4e19ce727379856af9b774c00dca25a8528f5a2318af1fcbffdc6e73e7e081b106b4fbfe1887ea5bde782f9b3c3a2cfe3b215a65c66c03d053bfdee4d5d940e3e28f0c2d9897460fc1153af198b9037aac4dcd76e999c6d6a1f67f559e87349c6416cd7fc37b85ee230ef8caa2417b65732b61dbdb68fd2d12eb3df87474a05f337305c79427a970700a1b63f2018ba06f32e522bba4d30a0ec8ae223d", "pythonpythonpythonpythonpython"}, #endif // modern KWallet hash {"$kwallet$88$b4e0299dc00fbb467f622fa2f0d7b275a82014e947ae20583bcbd4a32d8bb1402f0e7baca2177ef11b86f9ce4bcbed7b638a0697202b1737a15b2cdddcc01c43748d4528f59ce402c31da30d265f8d8a02b20baeefc6e946$1$56$8f90f3b63faf4049373703f896d3511136696af6ce60b92010daa397c6eb8ea4c867288e61694002d3c152ef4d8e3119bf39cbcd6b65edb8$50000", "openwall"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned char ct[0x10000]; unsigned int ctlen; // following fields are required to support modern KWallet files int kwallet_minor_version; unsigned char salt[256]; int saltlen; int iterations; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* ctlen */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (!res) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* ct */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "$")) != NULL) { res = atoi(p); /* minor version */ if (res != 1) { goto err; } if ((p = strtokm(NULL, "$")) == NULL) /* saltlen */ goto err; res = atoi(p); /* saltlen */ if (res > 256) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iterations */ goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); static struct custom_salt *salt; char *keeptr = ctcopy; int i; char *p; ctcopy += FORMAT_TAG_LEN; /* skip over "$kwallet$" */ if (!salt) salt = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); memset(salt, 0, sizeof(*salt)); p = strtokm(ctcopy, "$"); salt->ctlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < salt->ctlen; i++) salt->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if ((p = strtokm(NULL, "$")) != NULL) { // modern KWallet file salt->kwallet_minor_version = atoi(p); p = strtokm(NULL, "$"); salt->saltlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < salt->saltlen; i++) salt->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); salt->iterations = atoi(p); } else { // Old KWallet files, 0 has been the MINOR version until // KWallet 4.13, from that point we use it to upgrade the hash // to PBKDF2_SHA512 salt->kwallet_minor_version = 0; } MEM_FREE(keeptr); return (void *)salt; } static void password2hash(const char *password, unsigned char *hash, int *key_size) { SHA_CTX ctx; unsigned char output[20 * ((PLAINTEXT_LENGTH + 15) / 16)]; unsigned char buf[20]; int i, j, oindex = 0; int plength = strlen(password); // divide the password into blocks of size 16 and hash the resulting // individually! for (i = 0; i <= plength; i += 16) { SHA1_Init(&ctx); SHA1_Update(&ctx, password + i, MIN(plength - i, 16)); // To make brute force take longer for (j = 0; j < 2000; j++) { SHA1_Final(buf, &ctx); SHA1_Init(&ctx); SHA1_Update(&ctx, buf, 20); } memcpy(output + oindex, buf, 20); oindex += 20; } if (plength < 16) { // key size is 20 memcpy(hash, output, 20); *key_size = 20; } else if (plength < 32) { // key size is 40 (20/20) memcpy(hash, output, 40); *key_size = 40; } else if (plength < 48) { // key size is 56 (20/20/16 split) memcpy(hash, output, 56); *key_size = 56; } else { // key size is 56 (14/14/14 split) memcpy(hash + 14 * 0, output + 0, 14); memcpy(hash + 14 * 1, output + 20, 14); memcpy(hash + 14 * 2, output + 40, 14); *key_size = 56; } } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // Based on "BlowfishPersistHandler::read" in backendpersisthandler.cpp static int verify_key(unsigned char *key, int key_size) { SHA_CTX ctx; BF_KEY bf_key; int sz; int i; unsigned char testhash[20]; unsigned char buffer[0x10000]; // XXX respect the stack limits! const char *t; size_t fsize; memcpy(buffer, cur_salt->ct, cur_salt->ctlen); /* Blowfish implementation in KWallet is wrong w.r.t endianness * Well, that is why we had bad_blowfish_plug.c originally ;) */ alter_endianity(buffer, cur_salt->ctlen); if (cur_salt->kwallet_minor_version == 0) { BF_set_key(&bf_key, key_size, key); for (i = 0; i < cur_salt->ctlen; i += 8) { BF_ecb_encrypt(buffer + i, buffer + i, &bf_key, 0); } } else if (cur_salt->kwallet_minor_version == 1) { unsigned char ivec[8] = { 0 }; key_size = 56; BF_set_key(&bf_key, key_size, key); BF_cbc_encrypt(buffer, buffer, cur_salt->ctlen, &bf_key, ivec, 0); } alter_endianity(buffer, cur_salt->ctlen); /* verification stuff */ t = (char *) buffer; // strip the leading data t += 8; // one block of random data // strip the file size off fsize = 0; fsize |= ((size_t) (*t) << 24) & 0xff000000; t++; fsize |= ((size_t) (*t) << 16) & 0x00ff0000; t++; fsize |= ((size_t) (*t) << 8) & 0x0000ff00; t++; fsize |= (size_t) (*t) & 0x000000ff; t++; if (fsize > (size_t) (cur_salt->ctlen) - 8 - 4) { // file structure error return -1; } SHA1_Init(&ctx); SHA1_Update(&ctx, t, fsize); SHA1_Final(testhash, &ctx); // compare hashes sz = cur_salt->ctlen; for (i = 0; i < 20; i++) { if (testhash[i] != buffer[sz - 20 + i]) { return -2; } } return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { unsigned char key[MAX_KEYS_PER_CRYPT][56]; /* 56 seems to be the max. key size */ int key_size[MAX_KEYS_PER_CRYPT]; int i; if (cur_salt->kwallet_minor_version == 0) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { password2hash(saved_key[index+i], key[i], &key_size[i]); cracked[index+i] = !verify_key(key[i], key_size[i]); } } else if (cur_salt->kwallet_minor_version == 1) { #ifdef SIMD_COEF_64 int len[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = key[i]; } pbkdf2_sha512_sse((const unsigned char **)pin, len, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 56, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { pbkdf2_sha512((const unsigned char*)(saved_key[index+i]), strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, key[i], 56, 0); } #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cracked[index+i] = !verify_key(key[i], 56); } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void kwallet_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_kwallet = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, kwallet_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, kwallet_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImage method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ typedef struct _PixelChannels { double channel[CompositePixelChannel]; } PixelChannels; static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels) { register ssize_t i; assert(pixels != (PixelChannels **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (PixelChannels *) NULL) pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]); pixels=(PixelChannels **) RelinquishMagickMemory(pixels); return(pixels); } static PixelChannels **AcquirePixelThreadSet(const Image *image) { PixelChannels **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(PixelChannels **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { register ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns, sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) image->columns; j++) { register ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelChannels *color_1, *color_2; double distance; register ssize_t i; color_1=(const PixelChannels *) x; color_2=(const PixelChannels *) y; distance=0.0; for (i=0; i < MaxPixelChannels; i++) distance+=color_1->channel[i]-(double) color_2->channel[i]; return(distance < 0 ? -1 : distance > 0 ? 1 : 0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel, const MagickEvaluateOperator op,const double value) { double result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(double) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(double) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() that returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(double) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise, value); break; } case LaplacianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(double) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+ 1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(double) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(double) (pixel+value); break; } case MedianEvaluateOperator: { result=(double) (pixel+value); break; } case MinEvaluateOperator: { result=(double) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(double) (value*pixel); break; } case OrEvaluateOperator: { result=(double) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise, value); break; } case PowEvaluateOperator: { result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double) value)); break; } case RightShiftEvaluateOperator: { result=(double) ((size_t) pixel >> (size_t) (value+0.5)); break; } case RootMeanSquareEvaluateOperator: { result=(double) (pixel*pixel+value); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(double) (pixel-value); break; } case SumEvaluateOperator: { result=(double) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(double) (((double) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise, value); break; } case XorEvaluateOperator: { result=(double) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict evaluate_pixels; RandomInfo **magick_restrict random_info; size_t number_images; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images); if (evaluate_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j, k; for (j=0; j < (ssize_t) number_images; j++) for (k=0; k < MaxPixelChannels; k++) evaluate_pixel[j].channel[k]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; register ssize_t i; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel); PixelTrait traits=GetPixelChannelTraits(next,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((evaluate_traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[j].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(image,channel,p),op, evaluate_pixel[j].channel[i]); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); for (k=0; k < (ssize_t) GetPixelChannels(image); k++) q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(next,p) == 0) { p+=GetPixelChannels(next); continue; } for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[x].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(image,channel,p),j == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].channel[i]); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; switch (op) { case MeanEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]/=(double) number_images; break; } case MultiplyEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) evaluate_pixel[x].channel[i]*=QuantumScale; } break; } case RootMeanSquareEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/ number_images); break; } default: break; } } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double result; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,q) == 0)) continue; result=ApplyEvaluateOperator(random_info[id],q[i],op,value); if (op == MeanEvaluateOperator) result/=2.0; q[i]=ClampToQuantum(result); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImage) #endif proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImage method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { double result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+ c1*x^2+c2*x+c3). */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel+parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { double amplitude, bias, frequency, phase; /* Sinusoid: frequency, phase, amplitude, bias. */ frequency=(number_parameters >= 1) ? parameters[0] : 1.0; phase=(number_parameters >= 2) ? parameters[1] : 0.0; amplitude=(number_parameters >= 3) ? parameters[2] : 0.5; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (QuantumRange*(amplitude*sin((double) (2.0* MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias)); break; } case ArcsinFunction: { double bias, center, range, width; /* Arcsin (peged at range limits for invalid results): width, center, range, and bias. */ width=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=2.0/width*(QuantumScale*pixel-center); if ( result <= -1.0 ) result=bias-range/2.0; else if (result >= 1.0) result=bias+range/2.0; else result=(double) (range/MagickPI*asin((double) result)+bias); result*=QuantumRange; break; } case ArctanFunction: { double center, bias, range, slope; /* Arctan: slope, center, range, and bias. */ slope=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (MagickPI*slope*(QuantumScale*pixel-center)); result=(double) (QuantumRange*(range/MagickPI*atan((double) result)+bias)); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateFunctionImage(image,function,number_parameters,parameters, exception) != MagickFalse) return(MagickTrue); #endif if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyFunction(q[i],function,number_parameters,parameters, exception); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FunctionImage) #endif proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E n t r o p y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageEntropy() returns the entropy of one or more image channels. % % The format of the GetImageEntropy method is: % % MagickBooleanType GetImageEntropy(const Image *image,double *entropy, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o entropy: the average entropy of the selected channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { double area; ChannelStatistics *channel_statistics; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); area=0.0; channel_statistics[CompositePixelChannel].entropy=0.0; channel_statistics[CompositePixelChannel].standard_deviation=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channel_statistics[CompositePixelChannel].entropy+= channel_statistics[i].entropy; area++; } if (area > MagickEpsilon) { channel_statistics[CompositePixelChannel].entropy/=area; channel_statistics[CompositePixelChannel].standard_deviation= sqrt(channel_statistics[CompositePixelChannel].standard_deviation/area); } *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtrema() returns the extrema of one or more image channels. % % The format of the GetImageExtrema method is: % % MagickBooleanType GetImageExtrema(const Image *image,size_t *minima, % size_t *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageRange(image,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageKurtosis() returns the kurtosis and skewness of one or more image % channels. % % The format of the GetImageKurtosis method is: % % MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis, % double *skewness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { CacheView *image_view; double area, mean, standard_deviation, sum_squares, sum_cubes, sum_fourth_power; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; *kurtosis=0.0; *skewness=0.0; area=0.0; mean=0.0; standard_deviation=0.0; sum_squares=0.0; sum_cubes=0.0; sum_fourth_power=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageKurtosis) #endif { mean+=p[i]; sum_squares+=(double) p[i]*p[i]; sum_cubes+=(double) p[i]*p[i]*p[i]; sum_fourth_power+=(double) p[i]*p[i]*p[i]*p[i]; area++; } } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (area != 0.0) { mean/=area; sum_squares/=area; sum_cubes/=area; sum_fourth_power/=area; } standard_deviation=sqrt(sum_squares-(mean*mean)); if (standard_deviation != 0.0) { *kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares- 3.0*mean*mean*mean*mean; *kurtosis/=standard_deviation*standard_deviation*standard_deviation* standard_deviation; *kurtosis-=3.0; *skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean; *skewness/=standard_deviation*standard_deviation*standard_deviation; } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMean() returns the mean and standard deviation of one or more image % channels. % % The format of the GetImageMean method is: % % MagickBooleanType GetImageMean(const Image *image,double *mean, % double *standard_deviation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { double area; ChannelStatistics *channel_statistics; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); area=0.0; channel_statistics[CompositePixelChannel].mean=0.0; channel_statistics[CompositePixelChannel].standard_deviation=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channel_statistics[CompositePixelChannel].mean+=channel_statistics[i].mean; channel_statistics[CompositePixelChannel].standard_deviation+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; area++; } if (area > MagickEpsilon) { channel_statistics[CompositePixelChannel].mean/=area; channel_statistics[CompositePixelChannel].standard_deviation= sqrt(channel_statistics[CompositePixelChannel].standard_deviation/area); } *mean=channel_statistics[CompositePixelChannel].mean; *standard_deviation= channel_statistics[CompositePixelChannel].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageMoments method is: % % ChannelMoments *GetImageMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return((size_t) (channels == 0 ? 1 : channels)); } MagickExport ChannelMoments *GetImageMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 CacheView *image_view; ChannelMoments *channel_moments; double M00[MaxPixelChannels+1], M01[MaxPixelChannels+1], M02[MaxPixelChannels+1], M03[MaxPixelChannels+1], M10[MaxPixelChannels+1], M11[MaxPixelChannels+1], M12[MaxPixelChannels+1], M20[MaxPixelChannels+1], M21[MaxPixelChannels+1], M22[MaxPixelChannels+1], M30[MaxPixelChannels+1]; PointInfo centroid[MaxPixelChannels+1]; ssize_t channel, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) ResetMagickMemory(channel_moments,0,(MaxPixelChannels+1)* sizeof(*channel_moments)); (void) ResetMagickMemory(centroid,0,sizeof(centroid)); (void) ResetMagickMemory(M00,0,sizeof(M00)); (void) ResetMagickMemory(M01,0,sizeof(M01)); (void) ResetMagickMemory(M02,0,sizeof(M02)); (void) ResetMagickMemory(M03,0,sizeof(M03)); (void) ResetMagickMemory(M10,0,sizeof(M10)); (void) ResetMagickMemory(M11,0,sizeof(M11)); (void) ResetMagickMemory(M12,0,sizeof(M12)); (void) ResetMagickMemory(M20,0,sizeof(M20)); (void) ResetMagickMemory(M21,0,sizeof(M21)); (void) ResetMagickMemory(M22,0,sizeof(M22)); (void) ResetMagickMemory(M30,0,sizeof(M30)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute center of mass (centroid). */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M00[channel]+=QuantumScale*p[i]; M00[MaxPixelChannels]+=QuantumScale*p[i]; M10[channel]+=x*QuantumScale*p[i]; M10[MaxPixelChannels]+=x*QuantumScale*p[i]; M01[channel]+=y*QuantumScale*p[i]; M01[MaxPixelChannels]+=y*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute center of mass (centroid). */ if (M00[channel] < MagickEpsilon) { M00[channel]+=MagickEpsilon; centroid[channel].x=(double) image->columns/2.0; centroid[channel].y=(double) image->rows/2.0; continue; } M00[channel]+=MagickEpsilon; centroid[channel].x=M10[channel]/M00[channel]; centroid[channel].y=M01[channel]/M00[channel]; } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute the image moments. */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } M00[MaxPixelChannels]/=GetImageChannels(image); M01[MaxPixelChannels]/=GetImageChannels(image); M02[MaxPixelChannels]/=GetImageChannels(image); M03[MaxPixelChannels]/=GetImageChannels(image); M10[MaxPixelChannels]/=GetImageChannels(image); M11[MaxPixelChannels]/=GetImageChannels(image); M12[MaxPixelChannels]/=GetImageChannels(image); M20[MaxPixelChannels]/=GetImageChannels(image); M21[MaxPixelChannels]/=GetImageChannels(image); M22[MaxPixelChannels]/=GetImageChannels(image); M30[MaxPixelChannels]/=GetImageChannels(image); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])* ((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+ (M20[channel]-M02[channel])*(M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0* M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon))); channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y/ (channel_moments[channel].ellipse_axis.x+MagickEpsilon))); channel_moments[channel].ellipse_intensity=M00[channel]/ (MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0); M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0); M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0); M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0); M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0); M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0); M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0); M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0); M00[channel]=1.0; } image_view=DestroyCacheView(image_view); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].invariant[0]=M20[channel]+M02[channel]; channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+ M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImagePerceptualHash method is: % % ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, ExceptionInfo *exception) { ChannelPerceptualHash *perceptual_hash; char *colorspaces, *q; const char *artifact; MagickBooleanType status; register char *p; register ssize_t i; perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( MaxPixelChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); artifact=GetImageArtifact(image,"phash:colorspaces"); if (artifact != NULL) colorspaces=AcquireString(artifact); else colorspaces=AcquireString("sRGB,HCLp"); perceptual_hash[0].number_colorspaces=0; perceptual_hash[0].number_channels=0; q=colorspaces; for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++) { ChannelMoments *moments; Image *hash_image; size_t j; ssize_t channel, colorspace; if (i >= MaximumNumberOfPerceptualColorspaces) break; colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p); if (colorspace < 0) break; perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace; hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) break; hash_image->depth=8; status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace, exception); if (status == MagickFalse) break; moments=GetImageMoments(hash_image,exception); perceptual_hash[0].number_colorspaces++; perceptual_hash[0].number_channels+=GetImageChannels(hash_image); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) break; for (channel=0; channel <= MaxPixelChannels; channel++) for (j=0; j < MaximumNumberOfImageMoments; j++) perceptual_hash[channel].phash[i][j]= (-MagickLog10(moments[channel].invariant[j])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); } colorspaces=DestroyString(colorspaces); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageRange() returns the range of one or more image channels. % % The format of the GetImageRange method is: % % MagickBooleanType GetImageRange(const Image *image,double *minima, % double *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima, double *maxima,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType initialize, status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; initialize=MagickTrue; *maxima=0.0; *minima=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status,initialize) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double row_maxima = 0.0, row_minima = 0.0; MagickBooleanType row_initialize; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } row_initialize=MagickTrue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (row_initialize != MagickFalse) { row_minima=(double) p[i]; row_maxima=(double) p[i]; row_initialize=MagickFalse; } else { if ((double) p[i] < row_minima) row_minima=(double) p[i]; if ((double) p[i] > row_maxima) row_maxima=(double) p[i]; } } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageRange) #endif { if (initialize != MagickFalse) { *minima=row_minima; *maxima=row_maxima; initialize=MagickFalse; } else { if (row_minima < *minima) *minima=row_minima; if (row_maxima > *maxima) *maxima=row_maxima; } } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageStatistics() returns statistics for each channel in the image. The % statistics include the channel depth, its minima, maxima, mean, standard % deviation, kurtosis and skewness. You can access the red channel mean, for % example, like this: % % channel_statistics=GetImageStatistics(image,exception); % red_mean=channel_statistics[RedPixelChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageStatistics method is: % % ChannelStatistics *GetImageStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double *histogram; MagickStatusType status; QuantumAny range; register ssize_t i; size_t channels, depth; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); channel_statistics=(ChannelStatistics *) AcquireQuantumMemory( MaxPixelChannels+1,sizeof(*channel_statistics)); if ((channel_statistics == (ChannelStatistics *) NULL) || (histogram == (double *) NULL)) { if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (channel_statistics != (ChannelStatistics *) NULL) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } (void) ResetMagickMemory(channel_statistics,0,(MaxPixelChannels+1)* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } (void) ResetMagickMemory(histogram,0,(MaxMap+1)*MaxPixelChannels* sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[channel].depth; range=GetQuantumRange(depth); status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range), range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[channel].depth++; i--; continue; } } if ((double) p[i] < channel_statistics[channel].minima) channel_statistics[channel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[channel].maxima) channel_statistics[channel].maxima=(double) p[i]; channel_statistics[channel].sum+=p[i]; channel_statistics[channel].sum_squared+=(double) p[i]*p[i]; channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]* p[i]; channel_statistics[channel].area++; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum((double) p[i]))+i]++; } p+=GetPixelChannels(image); } } for (i=0; i < (ssize_t) MaxPixelChannels; i++) { double area, number_bins; register ssize_t j; area=PerceptibleReciprocal(channel_statistics[i].area); channel_statistics[i].sum*=area; channel_statistics[i].sum_squared*=area; channel_statistics[i].sum_cubed*=area; channel_statistics[i].sum_fourth_power*=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; channel_statistics[i].standard_deviation=sqrt( channel_statistics[i].variance-(channel_statistics[i].mean* channel_statistics[i].mean)); number_bins=0.0; for (j=0; j < (ssize_t) (MaxMap+1U); j++) if (histogram[GetPixelChannels(image)*j+i] > 0.0) number_bins++; for (j=0; j < (ssize_t) (MaxMap+1U); j++) { double count; count=histogram[GetPixelChannels(image)*j+i]*area; if (number_bins > MagickEpsilon) channel_statistics[i].entropy+=-count*MagickLog10(count)/ MagickLog10(number_bins); } } for (i=0; i < (ssize_t) MaxPixelChannels; i++) { PixelTrait traits=GetPixelChannelTraits(image,(PixelChannel) i); if ((traits & UpdatePixelTrait) == 0) continue; channel_statistics[CompositePixelChannel].area+=channel_statistics[i].area; channel_statistics[CompositePixelChannel].minima=MagickMin( channel_statistics[CompositePixelChannel].minima, channel_statistics[i].minima); channel_statistics[CompositePixelChannel].maxima=EvaluateMax( channel_statistics[CompositePixelChannel].maxima, channel_statistics[i].maxima); channel_statistics[CompositePixelChannel].sum+=channel_statistics[i].sum; channel_statistics[CompositePixelChannel].sum_squared+= channel_statistics[i].sum_squared; channel_statistics[CompositePixelChannel].sum_cubed+= channel_statistics[i].sum_cubed; channel_statistics[CompositePixelChannel].sum_fourth_power+= channel_statistics[i].sum_fourth_power; channel_statistics[CompositePixelChannel].mean+=channel_statistics[i].mean; channel_statistics[CompositePixelChannel].variance+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; channel_statistics[CompositePixelChannel].standard_deviation+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; if (channel_statistics[i].entropy > MagickEpsilon) channel_statistics[CompositePixelChannel].entropy+= channel_statistics[i].entropy; } channels=GetImageChannels(image); channel_statistics[CompositePixelChannel].area/=channels; channel_statistics[CompositePixelChannel].sum/=channels; channel_statistics[CompositePixelChannel].sum_squared/=channels; channel_statistics[CompositePixelChannel].sum_cubed/=channels; channel_statistics[CompositePixelChannel].sum_fourth_power/=channels; channel_statistics[CompositePixelChannel].mean/=channels; channel_statistics[CompositePixelChannel].variance/=channels; channel_statistics[CompositePixelChannel].standard_deviation= sqrt(channel_statistics[CompositePixelChannel].standard_deviation/channels); channel_statistics[CompositePixelChannel].kurtosis/=channels; channel_statistics[CompositePixelChannel].skewness/=channels; channel_statistics[CompositePixelChannel].entropy/=channels; for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { double standard_deviation; if (channel_statistics[i].standard_deviation == 0.0) continue; standard_deviation=PerceptibleReciprocal( channel_statistics[i].standard_deviation); channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0* channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0* channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation*standard_deviation)-3.0; } histogram=(double *) RelinquishMagickMemory(histogram); if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict polynomial_pixels; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *polynomial_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } polynomial_pixel=polynomial_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) polynomial_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; if (j >= (ssize_t) number_terms) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(next,p) == 0) { p+=GetPixelChannels(next); continue; } for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { MagickRealType coefficient, degree; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(next,channel); PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (polynomial_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; coefficient=(MagickRealType) terms[2*j]; degree=(MagickRealType) terms[(j << 1)+1]; polynomial_pixel[x].channel[i]+=coefficient* pow(QuantumScale*GetPixelChannel(image,channel,p),degree); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PolynomialImages) #endif proceed=SetImageProgress(images,PolynomialImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ typedef struct _SkipNode { size_t next[9], count, signature; } SkipNode; typedef struct _SkipList { ssize_t level; SkipNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed; SkipList skip_list; size_t signature; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); if (pixel_list->skip_list.nodes != (SkipNode *) NULL) pixel_list->skip_list.nodes=(SkipNode *) RelinquishMagickMemory( pixel_list->skip_list.nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; pixel_list->skip_list.nodes=(SkipNode *) AcquireQuantumMemory(65537UL, sizeof(*pixel_list->skip_list.nodes)); if (pixel_list->skip_list.nodes == (SkipNode *) NULL) return(DestroyPixelList(pixel_list)); (void) ResetMagickMemory(pixel_list->skip_list.nodes,0,65537UL* sizeof(*pixel_list->skip_list.nodes)); pixel_list->signature=MagickCoreSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const size_t color) { register SkipList *p; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ p=(&pixel_list->skip_list); p->nodes[color].signature=pixel_list->signature; p->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=p->level; level >= 0; level--) { while (p->nodes[search].next[level] < color) search=p->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (p->level+2)) level=p->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > p->level) { p->level++; update[p->level]=65536UL; } /* Link the node into the skip-list. */ do { p->nodes[color].next[level]=p->nodes[update[level]].next[level]; p->nodes[update[level]].next[level]=color; } while (level-- > 0); } static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, maximum; ssize_t count; /* Find the maximum value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; maximum=p->nodes[color].next[0]; do { color=p->nodes[color].next[0]; if (color > maximum) maximum=color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) maximum); } static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel) { double sum; register SkipList *p; size_t color; ssize_t count; /* Find the mean value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; do { color=p->nodes[color].next[0]; sum+=(double) p->nodes[color].count*color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sum); } static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color; ssize_t count; /* Find the median value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; do { color=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, minimum; ssize_t count; /* Find the minimum value for each of the color. */ p=(&pixel_list->skip_list); count=0; color=65536UL; minimum=p->nodes[color].next[0]; do { color=p->nodes[color].next[0]; if (color < minimum) minimum=color; count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) minimum); } static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, max_count, mode; ssize_t count; /* Make each pixel the 'predominant color' of the specified neighborhood. */ p=(&pixel_list->skip_list); color=65536L; mode=color; max_count=p->nodes[mode].count; count=0; do { color=p->nodes[color].next[0]; if (p->nodes[color].count > max_count) { mode=color; max_count=p->nodes[mode].count; } count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) mode); } static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, next, previous; ssize_t count; /* Finds the non peak value for each of the colors. */ p=(&pixel_list->skip_list); color=65536L; next=p->nodes[color].next[0]; count=0; do { previous=color; color=next; next=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetRootMeanSquarePixelList(PixelList *pixel_list, Quantum *pixel) { double sum; register SkipList *p; size_t color; ssize_t count; /* Find the root mean square value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; do { color=p->nodes[color].next[0]; sum+=(double) (p->nodes[color].count*color*color); count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sqrt(sum)); } static inline void GetStandardDeviationPixelList(PixelList *pixel_list, Quantum *pixel) { double sum, sum_squared; register SkipList *p; size_t color; ssize_t count; /* Find the standard-deviation value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=p->nodes[color].next[0]; sum+=(double) p->nodes[color].count*color; for (i=0; i < (ssize_t) p->nodes[color].count; i++) sum_squared+=((double) color)*((double) color); count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; *pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum))); } static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(pixel); signature=pixel_list->skip_list.nodes[index].signature; if (signature == pixel_list->signature) { pixel_list->skip_list.nodes[index].count++; return; } AddNodePixelList(pixel_list,index); } static void ResetPixelList(PixelList *pixel_list) { int level; register SkipNode *root; register SkipList *p; /* Reset the skip-list. */ p=(&pixel_list->skip_list); root=p->nodes+65536UL; p->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **magick_restrict pixel_list; ssize_t center, y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(statistic_image,DirectClass,exception); if (status == MagickFalse) { statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1)); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))* (MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y- (ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1), MagickMax(height,1),exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) statistic_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { Quantum pixel; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image, channel); if ((traits == UndefinedPixelTrait) || (statistic_traits == UndefinedPixelTrait)) continue; if (((statistic_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(statistic_image,channel,p[center+i],q); continue; } pixels=p; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { InsertPixelList(pixels[i],pixel_list[id]); pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } switch (type) { case GradientStatistic: { double maximum, minimum; GetMinimumPixelList(pixel_list[id],&pixel); minimum=(double) pixel; GetMaximumPixelList(pixel_list[id],&pixel); maximum=(double) pixel; pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum)); break; } case MaximumStatistic: { GetMaximumPixelList(pixel_list[id],&pixel); break; } case MeanStatistic: { GetMeanPixelList(pixel_list[id],&pixel); break; } case MedianStatistic: default: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { GetMinimumPixelList(pixel_list[id],&pixel); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case RootMeanSquareStatistic: { GetRootMeanSquarePixelList(pixel_list[id],&pixel); break; } case StandardDeviationStatistic: { GetStandardDeviationPixelList(pixel_list[id],&pixel); break; } } SetPixelChannel(statistic_image,channel,pixel,q); } p+=GetPixelChannels(image); q+=GetPixelChannels(statistic_image); } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_StatisticImage) #endif proceed=SetImageProgress(image,StatisticImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
nonneg_frob.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../admm.h" /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ /** * @brief The proximal update for a non-negative factorization. This routine * projects 'primal' onto the non-negative orthant while adding a Frob. * norm regularizer. This scales primal by rho*inv((lambda+rho) * eye), * or more simply multiplies each entry by (rho/(lambda+rho)). It then * projects to the non-negative orthant. * * @param[out] primal The row-major matrix to update. * @param nrows The number of rows in primal. * @param ncols The number of columns in primal. * @param offset Not used. * @param data Not used. * @param rho Not used. * @param should_parallelize If true, parallelize. */ void nonneg_frob_prox( val_t * primal, idx_t const nrows, idx_t const ncols, idx_t const offset, void * data, val_t const rho, bool const should_parallelize) { val_t const lambda = *((val_t *) data); val_t const mult = rho / (lambda + rho); #pragma omp parallel for if(should_parallelize) for(idx_t i=0; i < nrows; ++i) { for(idx_t j=0; j < ncols; ++j) { idx_t const index = j + (i*ncols); val_t const new_val = primal[index] * mult; primal[index] = (new_val > 0.) ? new_val : 0.; } } } /** * @brief Free the single val_t allocated for Frobenius regularization. * * @param data The data to free. */ void nonneg_frob_free( void * data) { splatt_free(data); } /****************************************************************************** * API FUNCTIONS *****************************************************************************/ splatt_error_type splatt_register_ntf_frob( splatt_cpd_opts * opts, splatt_val_t const multiplier, splatt_idx_t const * const modes_included, splatt_idx_t const num_modes) { for(idx_t m = 0; m < num_modes; ++m) { idx_t const mode = modes_included[m]; splatt_cpd_constraint * ntf_con = splatt_alloc_constraint(SPLATT_CON_ADMM); ntf_con->prox_func = nonneg_frob_prox; ntf_con->free_func = nonneg_frob_free; /* set hints to assist optimizations */ ntf_con->hints.row_separable = true; ntf_con->hints.sparsity_inducing = true; sprintf(ntf_con->description, "NTF-L1-REG (%0.1e)", multiplier); /* store multiplier */ val_t * mult = splatt_malloc(sizeof(*mult)); *mult = multiplier; ntf_con->data = mult; /* add to the CPD factorization */ splatt_register_constraint(opts, mode, ntf_con); /* memory will be freed by splatt_free_constraint() */ } return SPLATT_SUCCESS; }
lis_matrix_diag.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_init" LIS_INT lis_matrix_diag_init(LIS_MATRIX_DIAG *D) { LIS_DEBUG_FUNC_IN; memset(*D,0,sizeof(struct LIS_MATRIX_DIAG_STRUCT)); (*D)->bn = 1; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_check" LIS_INT lis_matrix_diag_check(LIS_MATRIX_DIAG D, LIS_INT level) { LIS_DEBUG_FUNC_IN; switch( level ) { case LIS_MATRIX_CHECK_NULL: if( D==NULL ) { LIS_SETERR(LIS_ERR_ILL_ARG,"diagonal matrix D is undefined\n"); return LIS_ERR_ILL_ARG; } break; default: if( D==NULL ) { LIS_SETERR(LIS_ERR_ILL_ARG,"diagonal matrix D is undefined\n"); return LIS_ERR_ILL_ARG; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_create" LIS_INT lis_matrix_diag_create(LIS_INT local_n, LIS_INT global_n, LIS_Comm comm, LIS_MATRIX_DIAG *D) { LIS_INT nprocs,my_rank; LIS_INT is,ie; LIS_INT *ranges; #ifdef USE_MPI LIS_INT i; #endif LIS_DEBUG_FUNC_IN; *D = NULL; if( global_n>0 && local_n>global_n ) { LIS_SETERR2(LIS_ERR_ILL_ARG,"local n(=%d) is larger than global n(=%d)\n",local_n,global_n); return LIS_ERR_ILL_ARG; } if( local_n<0 || global_n<0 ) { LIS_SETERR2(LIS_ERR_ILL_ARG,"local n(=%d) or global n(=%d) are less than 0\n",local_n,global_n); return LIS_ERR_ILL_ARG; } if( local_n==0 && global_n==0 ) { LIS_SETERR2(LIS_ERR_ILL_ARG,"local n(=%d) and global n(=%d) are 0\n",local_n,global_n); return LIS_ERR_ILL_ARG; } *D = (LIS_MATRIX_DIAG)lis_malloc( sizeof(struct LIS_MATRIX_DIAG_STRUCT),"lis_matrix_diag_create::D" ); if( NULL==*D ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_DIAG_STRUCT)); return LIS_OUT_OF_MEMORY; } lis_matrix_diag_init(D); #ifdef USE_MPI MPI_Comm_size(comm,&nprocs); MPI_Comm_rank(comm,&my_rank); ranges = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_matrix_diag_create::ranges" ); if( ranges==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); lis_matrix_diag_destroy(*D); *D = NULL; return LIS_OUT_OF_MEMORY; } #else nprocs = 1; my_rank = 0; ranges = NULL; #endif #ifdef USE_MPI MPI_Allreduce(&local_n,&i,1,LIS_MPI_INT,MPI_SUM,comm); if( i==0 ) #else if( local_n==0 ) #endif { #ifdef USE_MPI LIS_GET_ISIE(my_rank,nprocs,global_n,is,ie); local_n = ie-is; MPI_Allgather(&ie,1,LIS_MPI_INT,&ranges[1],1,LIS_MPI_INT,comm); ranges[0] = 0; #else local_n = global_n; is = 0; ie = global_n; #endif } else { #ifdef USE_MPI MPI_Allgather(&local_n,1,LIS_MPI_INT,&ranges[1],1,LIS_MPI_INT,comm); ranges[0] = 0; for(i=0;i<nprocs;i++) { ranges[i+1] += ranges[i]; } global_n = ranges[nprocs]; is = ranges[my_rank]; ie = ranges[my_rank+1]; #else global_n = local_n; is = 0; ie = local_n; #endif } (*D)->ranges = ranges; (*D)->value = (LIS_SCALAR *)lis_malloc( local_n*sizeof(LIS_SCALAR),"lis_matrix_diag_create::D->value" ); if( NULL==(*D)->value ) { LIS_SETERR_MEM(local_n*sizeof(LIS_SCALAR)); lis_matrix_diag_destroy(*D); *D = NULL; return LIS_OUT_OF_MEMORY; } (*D)->n = local_n; (*D)->nr = local_n; (*D)->gn = global_n; (*D)->np = local_n; (*D)->comm = comm; (*D)->my_rank = my_rank; (*D)->nprocs = nprocs; (*D)->is = is; (*D)->ie = ie; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_destroy" LIS_INT lis_matrix_diag_destroy(LIS_MATRIX_DIAG D) { LIS_INT i; LIS_DEBUG_FUNC_IN; if( D ) { if( D->value ) lis_free( D->value ); if( D->work ) lis_free( D->work ); if( D->bns ) { for(i=0;i<D->nr;i++) { if( D->v_value[i] ) free(D->v_value[i]); } lis_free2(2,D->bns,D->v_value); } if( D->ptr ) lis_free( D->ptr ); if( D->ranges ) lis_free( D->ranges ); lis_free(D); } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_duplicate" LIS_INT lis_matrix_diag_duplicate(LIS_MATRIX_DIAG Din, LIS_MATRIX_DIAG *Dout) { LIS_INT nnz,err,nr,bnmax,t; LIS_INT nprocs; LIS_INT i; LIS_INT *w_bns,*ptr; #ifdef USE_MPI LIS_INT *ranges; #endif LIS_DEBUG_FUNC_IN; err = lis_matrix_diag_check(Din,LIS_MATRIX_CHECK_ALL); if( err ) return err; nprocs = Din->nprocs; *Dout = NULL; *Dout = (LIS_MATRIX_DIAG)lis_malloc( sizeof(struct LIS_MATRIX_DIAG_STRUCT),"lis_matrix_diag_duplicate::Dout" ); if( NULL==*Dout ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_DIAG_STRUCT)); return LIS_OUT_OF_MEMORY; } lis_matrix_diag_init(Dout); #ifdef USE_MPI ranges = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_matrix_diag_duplicate::ranges" ); if( ranges==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } for(i=0;i<nprocs+1;i++) ranges[i] = Din->ranges[i]; (*Dout)->ranges = ranges; #else (*Dout)->ranges = NULL; #endif if( Din->bns==NULL ) { (*Dout)->value = (LIS_SCALAR *)lis_malloc( Din->bn*Din->bn*Din->nr*sizeof(LIS_SCALAR),"lis_matrix_diag_duplicate::Dout->value" ); if( NULL==(*Dout)->value ) { LIS_SETERR_MEM(Din->bn*Din->bn*Din->nr*sizeof(LIS_SCALAR)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } (*Dout)->bn = Din->bn; } else { nr = Din->nr; (*Dout)->bns = (LIS_INT *)lis_malloc( nr*sizeof(LIS_INT),"lis_matrix_diag_duplicate::Dout->bns" ); if( NULL==(*Dout)->bns ) { LIS_SETERR_MEM(nr*sizeof(LIS_INT)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } (*Dout)->v_value = (LIS_SCALAR **)lis_malloc( nr*sizeof(LIS_SCALAR *),"lis_matrix_diag_duplicate::Dout->value" ); if( NULL==(*Dout)->v_value ) { LIS_SETERR_MEM(nr*sizeof(LIS_SCALAR *)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } bnmax = 0; for(i=0;i<nr;i++) { t = Din->bns[i]; bnmax = _max(bnmax,t); (*Dout)->bns[i] = t; (*Dout)->v_value[i] = (LIS_SCALAR *)malloc( t*t*sizeof(LIS_SCALAR)); } (*Dout)->bn = bnmax; (*Dout)->nr = nr; } (*Dout)->n = Din->n; (*Dout)->nr = Din->nr; (*Dout)->gn = Din->gn; (*Dout)->np = Din->np; (*Dout)->comm = Din->comm; (*Dout)->my_rank = Din->my_rank; (*Dout)->nprocs = Din->nprocs; (*Dout)->is = Din->is; (*Dout)->ie = Din->ie; (*Dout)->origin = Din->origin; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_duplicate" LIS_INT lis_matrix_diag_duplicateM(LIS_MATRIX Ain, LIS_MATRIX_DIAG *Dout) { LIS_INT nr,err; LIS_INT nprocs; LIS_INT i,k,t,bnmax; #ifdef USE_MPI LIS_INT *ranges; #endif LIS_DEBUG_FUNC_IN; err = lis_matrix_check(Ain,LIS_MATRIX_CHECK_ALL); if( err ) return err; nprocs = Ain->nprocs; *Dout = NULL; *Dout = (LIS_MATRIX_DIAG)lis_malloc( sizeof(struct LIS_MATRIX_DIAG_STRUCT),"lis_matrix_diag_duplicateM::Dout" ); if( NULL==*Dout ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_DIAG_STRUCT)); return LIS_OUT_OF_MEMORY; } lis_matrix_diag_init(Dout); #ifdef USE_MPI ranges = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_matrix_diag_duplicateM::ranges" ); if( ranges==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } for(i=0;i<nprocs+1;i++) ranges[i] = Ain->ranges[i]; (*Dout)->ranges = ranges; #else (*Dout)->ranges = NULL; #endif switch(Ain->matrix_type) { case LIS_MATRIX_BSR: case LIS_MATRIX_BSC: k = Ain->nr*Ain->bnr*Ain->bnc; (*Dout)->value = (LIS_SCALAR *)lis_malloc( k*sizeof(LIS_SCALAR),"lis_matrix_diag_duplicateM::Dout->value" ); if( NULL==(*Dout)->value ) { LIS_SETERR_MEM(k*sizeof(LIS_SCALAR)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } (*Dout)->bn = Ain->bnr; (*Dout)->nr = Ain->nr; break; case LIS_MATRIX_VBR: nr = Ain->nr; (*Dout)->bns = (LIS_INT *)lis_malloc( nr*sizeof(LIS_INT),"lis_matrix_diag_duplicateM::Dout->bns" ); if( NULL==(*Dout)->bns ) { LIS_SETERR_MEM(nr*sizeof(LIS_INT)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } (*Dout)->v_value = (LIS_SCALAR **)lis_malloc( nr*sizeof(LIS_SCALAR *),"lis_matrix_diag_duplicateM::Dout->value" ); if( NULL==(*Dout)->v_value ) { LIS_SETERR_MEM(nr*sizeof(LIS_SCALAR *)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } bnmax = 0; for(i=0;i<nr;i++) { t = Ain->row[i+1] - Ain->row[i]; bnmax = _max(bnmax,t); (*Dout)->bns[i] = t; (*Dout)->v_value[i] = (LIS_SCALAR *)malloc( t*t*sizeof(LIS_SCALAR)); } (*Dout)->bn = bnmax; (*Dout)->nr = nr; break; default: (*Dout)->value = (LIS_SCALAR *)lis_malloc( Ain->np*sizeof(LIS_SCALAR),"lis_matrix_diag_duplicateM::Dout->value" ); if( NULL==(*Dout)->value ) { LIS_SETERR_MEM(Ain->np*sizeof(LIS_SCALAR)); lis_matrix_diag_destroy(*Dout); *Dout = NULL; return LIS_OUT_OF_MEMORY; } (*Dout)->nr = Ain->n; break; } (*Dout)->n = Ain->n; (*Dout)->gn = Ain->gn; (*Dout)->np = Ain->np; (*Dout)->comm = Ain->comm; (*Dout)->my_rank = Ain->my_rank; (*Dout)->nprocs = Ain->nprocs; (*Dout)->is = Ain->is; (*Dout)->ie = Ain->ie; (*Dout)->origin = Ain->origin; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_malloc" LIS_INT lis_matrix_diag_mallocM(LIS_MATRIX A, LIS_SCALAR **diag) { LIS_INT err; LIS_INT k; LIS_DEBUG_FUNC_IN; err = lis_matrix_check(A,LIS_MATRIX_CHECK_ALL); if( err ) return err; switch(A->matrix_type) { case LIS_MATRIX_BSR: k = A->nr*A->bnr*A->bnc; *diag = (LIS_SCALAR *)lis_malloc( k*sizeof(LIS_SCALAR),"lis_matrix_diag_mallocM::diag" ); if( NULL==*diag ) { LIS_SETERR_MEM(k*sizeof(LIS_SCALAR)); *diag = NULL; return LIS_OUT_OF_MEMORY; } break; default: *diag = (LIS_SCALAR *)lis_malloc( A->n*sizeof(LIS_SCALAR),"lis_matrix_diag_mallocM::diag" ); if( NULL==*diag ) { LIS_SETERR_MEM(A->n*sizeof(LIS_SCALAR)); *diag = NULL; return LIS_OUT_OF_MEMORY; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_get_range" LIS_INT lis_matrix_diag_get_range(LIS_MATRIX_DIAG D, LIS_INT *is, LIS_INT *ie) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_matrix_diag_check(D,LIS_MATRIX_CHECK_NULL); if( err ) return err; *is = D->is; *ie = D->ie; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_get_size" LIS_INT lis_matrix_diag_get_size(LIS_MATRIX_DIAG D, LIS_INT *local_n, LIS_INT *global_n) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_matrix_diag_check(D,LIS_MATRIX_CHECK_NULL); if( err ) return err; *local_n = D->n; *global_n = D->gn; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_set_blocksize" LIS_INT lis_matrix_diag_set_blocksize(LIS_MATRIX_DIAG D, LIS_INT bn, LIS_INT *bns) { LIS_INT i,n,nnz,nr,bnmax,t; LIS_INT *w_bns,*ptr; LIS_INT err; LIS_SCALAR *diag; LIS_DEBUG_FUNC_IN; err = lis_matrix_diag_check(D,LIS_MATRIX_CHECK_NULL); if( err ) return err; n = D->n; if( bns==NULL ) { nr = 1 + (n-1)/bn; diag = (LIS_SCALAR *)lis_malloc( bn*bn*nr*sizeof(LIS_SCALAR),"lis_matrix_diag_set_blocksize::diag" ); if( NULL==diag ) { LIS_SETERR_MEM(bn*bn*nr*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } lis_free(D->value); D->value = diag; D->nr = nr; D->bn = bn; } else { if( D->bns==NULL ) { lis_free(D->value); D->bns = (LIS_INT *)lis_malloc( bn*sizeof(LIS_INT),"lis_matrix_diag_duplicateM::Dout->bns" ); if( NULL==D->bns ) { LIS_SETERR_MEM(bn*sizeof(LIS_INT)); lis_matrix_diag_destroy(D); D = NULL; return LIS_OUT_OF_MEMORY; } D->v_value = (LIS_SCALAR **)lis_malloc( bn*sizeof(LIS_SCALAR *),"lis_matrix_diag_duplicateM::Dout->value" ); if( NULL==D->v_value ) { LIS_SETERR_MEM(bn*sizeof(LIS_SCALAR *)); lis_matrix_diag_destroy(D); D = NULL; return LIS_OUT_OF_MEMORY; } bnmax = 0; for(i=0;i<bn;i++) { t = bns[i]; bnmax = _max(bnmax,t); D->bns[i] = t; D->v_value[i] = (LIS_SCALAR *)malloc( t*t*sizeof(LIS_SCALAR)); } D->bn = bnmax; D->nr = bn; } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_copy" LIS_INT lis_matrix_diag_copy(LIS_MATRIX_DIAG X, LIS_MATRIX_DIAG Y) { LIS_INT i,k,n,nr,bn; LIS_SCALAR *x,*y; LIS_DEBUG_FUNC_IN; x = X->value; y = Y->value; n = X->n; nr = X->nr; if( n!=Y->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of diagonal matrix X and Y is not equal\n"); return LIS_ERR_ILL_ARG; } if( X->bns ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<nr;i++) { bn = X->bns[i]*X->bns[i]; memcpy(Y->v_value[i],X->v_value[i],bn*sizeof(LIS_SCALAR)); } } else { bn = X->bn*X->bn; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { memcpy(&y[i*bn],&x[i*bn],bn*sizeof(LIS_SCALAR)); } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_scale" LIS_INT lis_matrix_diag_scale(LIS_SCALAR alpha, LIS_MATRIX_DIAG D) { LIS_INT i,j,k,nr,bn; LIS_SCALAR *d; LIS_DEBUG_FUNC_IN; d = D->value; nr = D->nr; bn = D->bn; if( D->bns ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { bn = D->bns[i]*D->bns[i]; for(j=0;j<bn;j++) { D->v_value[i][j] *= alpha; } } } else { if( bn==1 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { d[i] = alpha * d[i]; } } else if( bn==2 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { d[4*i] = alpha * d[4*i]; d[4*i+1] = alpha * d[4*i+1]; d[4*i+2] = alpha * d[4*i+2]; d[4*i+3] = alpha * d[4*i+3]; } } else if( bn==3 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { d[9*i] = alpha * d[9*i]; d[9*i+1] = alpha * d[9*i+1]; d[9*i+2] = alpha * d[9*i+2]; d[9*i+3] = alpha * d[9*i+3]; d[9*i+4] = alpha * d[9*i+4]; d[9*i+5] = alpha * d[9*i+5]; d[9*i+6] = alpha * d[9*i+6]; d[9*i+7] = alpha * d[9*i+7]; d[9*i+8] = alpha * d[9*i+8]; } } else if( bn==4 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { d[16*i] = alpha * d[16*i]; d[16*i+1] = alpha * d[16*i+1]; d[16*i+2] = alpha * d[16*i+2]; d[16*i+3] = alpha * d[16*i+3]; d[16*i+4] = alpha * d[16*i+4]; d[16*i+5] = alpha * d[16*i+5]; d[16*i+6] = alpha * d[16*i+6]; d[16*i+7] = alpha * d[16*i+7]; d[16*i+8] = alpha * d[16*i+8]; d[16*i+9] = alpha * d[16*i+9]; d[16*i+10] = alpha * d[16*i+10]; d[16*i+11] = alpha * d[16*i+11]; d[16*i+12] = alpha * d[16*i+12]; d[16*i+13] = alpha * d[16*i+13]; d[16*i+14] = alpha * d[16*i+14]; d[16*i+15] = alpha * d[16*i+15]; } } else { bn = bn*bn; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { for(j=0;j<bn;j++) { d[i*bn+j] = alpha * d[i*bn+j]; } } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_inverse" LIS_INT lis_matrix_diag_inverse(LIS_MATRIX_DIAG D) { LIS_INT i,j,k,nr,bn,bs,n; LIS_SCALAR *d; LIS_DEBUG_FUNC_IN; n = D->n; d = D->value; nr = D->nr; bn = D->bn; bs = D->bn*D->bn; if( D->bns ) { #ifdef _OPENMP #pragma omp parallel for private(i,bn) #endif for(i=0; i<nr; i++) { bn = D->bns[i]; lis_array_invGauss(bn,D->v_value[i]); } } else { if( bn==1 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { d[i] = 1.0 / d[i]; } } else { k = n%bn; if( k!=0 ) { for(i=bn-1;i>=k;i--) { d[bs*(nr-1)+i*(bn+1)] = 1.0; } } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<nr;i++) { lis_array_invGauss(bn,&d[i*bs]); } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_matvec" LIS_INT lis_matrix_diag_matvec(LIS_MATRIX_DIAG D, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,nr,bn,bs; LIS_SCALAR *d,*x,*y; LIS_DEBUG_FUNC_IN; d = D->value; x = X->value; y = Y->value; nr = D->nr; bn = D->bn; bs = D->bn*D->bn; if( D->bns ) { #ifdef _OPENMP #pragma omp parallel for private(i,bn) #endif for(i=0; i<nr; i++) { bn = D->bns[i]; lis_array_matvec(bn,D->v_value[i],&x[i*bn],&y[i*bn],LIS_INS_VALUE); } } else { if( bn==1 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { y[i] = x[i] * d[i]; } } else if( bn==2 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { y[2*i] = d[4*i] * x[2*i] + d[4*i+2] * x[2*i+1]; y[2*i+1] = d[4*i+1] * x[2*i] + d[4*i+3] * x[2*i+1]; } } else if( bn==3 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { y[3*i] = d[9*i] * x[3*i] + d[9*i+3] * x[3*i+1] + d[9*i+6] * x[3*i+2]; y[3*i+1] = d[9*i+1] * x[3*i] + d[9*i+4] * x[3*i+1] + d[9*i+7] * x[3*i+2]; y[3*i+2] = d[9*i+2] * x[3*i] + d[9*i+5] * x[3*i+1] + d[9*i+8] * x[3*i+2]; } } else if( bn==4 ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { y[4*i] = d[16*i] * x[4*i] + d[16*i+4] * x[4*i+1] + d[16*i+8] * x[4*i+2] + d[16*i+12] * x[4*i+3]; y[4*i+1] = d[16*i+1] * x[4*i] + d[16*i+5] * x[4*i+1] + d[16*i+9] * x[4*i+2] + d[16*i+13] * x[4*i+3]; y[4*i+2] = d[16*i+2] * x[4*i] + d[16*i+6] * x[4*i+1] + d[16*i+10] * x[4*i+2] + d[16*i+14] * x[4*i+3]; y[4*i+3] = d[16*i+3] * x[4*i] + d[16*i+7] * x[4*i+1] + d[16*i+11] * x[4*i+2] + d[16*i+15] * x[4*i+3]; } } else { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { lis_array_matvec(bn,&d[i*bs],&x[i*bn],&y[i*bn],LIS_INS_VALUE); } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_matvect" LIS_INT lis_matrix_diag_matvect(LIS_MATRIX_DIAG D, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,nr,bn,bs; LIS_SCALAR *d,*x,*y; LIS_DEBUG_FUNC_IN; d = D->value; x = X->value; y = Y->value; nr = D->nr; bn = D->bn; bs = D->bn*D->bn; if( D->bns ) { #ifdef _OPENMP #pragma omp parallel for private(i,bn) #endif for(i=0; i<nr; i++) { bn = D->bns[i]; lis_array_matvect(bn,D->v_value[i],&x[i*bn],&y[i*bn],LIS_INS_VALUE); } } else { #if 0 if( bn==1 ) { #pragma omp parallel for private(i) for(i=0; i<nr; i++) { y[i] = x[i] * d[i]; } } else if( bn==2 ) { #pragma omp parallel for private(i) for(i=0; i<nr; i++) { y[2*i] = d[4*i] * x[2*i] + d[4*i+1] * x[2*i+1]; y[2*i+1] = d[4*i+2] * x[2*i] + d[4*i+3] * x[2*i+1]; } } else if( bn==3 ) { #pragma omp parallel for private(i) for(i=0; i<nr; i++) { y[3*i] = d[9*i] * x[3*i] + d[9*i+1] * x[3*i+1] + d[9*i+2] * x[3*i+2]; y[3*i+1] = d[9*i+3] * x[3*i] + d[9*i+4] * x[3*i+1] + d[9*i+5] * x[3*i+2]; y[3*i+2] = d[9*i+6] * x[3*i] + d[9*i+7] * x[3*i+1] + d[9*i+8] * x[3*i+2]; } } else if( bn==4 ) { #pragma omp parallel for private(i) for(i=0; i<nr; i++) { y[4*i] = d[16*i ] * x[4*i] + d[16*i+ 1] * x[4*i+1] + d[16*i+ 2] * x[4*i+2] + d[16*i+ 3] * x[4*i+3]; y[4*i+1] = d[16*i+ 4] * x[4*i] + d[16*i+ 5] * x[4*i+1] + d[16*i+ 6] * x[4*i+2] + d[16*i+ 7] * x[4*i+3]; y[4*i+2] = d[16*i+ 8] * x[4*i] + d[16*i+ 9] * x[4*i+1] + d[16*i+10] * x[4*i+2] + d[16*i+11] * x[4*i+3]; y[4*i+3] = d[16*i+12] * x[4*i] + d[16*i+13] * x[4*i+1] + d[16*i+14] * x[4*i+2] + d[16*i+15] * x[4*i+3]; } } else #endif { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<nr; i++) { lis_array_matvect(bn,&d[i*bs],&x[i*bn],&y[i*bn],LIS_INS_VALUE); } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_diag_print" LIS_INT lis_matrix_diag_print(LIS_MATRIX_DIAG D) { LIS_INT k,i,j,nr,bn,nn; LIS_DEBUG_FUNC_IN; nr = D->nr; if( D->bns ) { nn = 0; for(k=0; k<nr; k++) { bn = D->bns[k]; for(i=0;i<bn;i++) { printf("%4d (", nn+i); for(j=0; j<bn; j++) { printf("%6.2f ", D->v_value[k][j*bn+i]); } printf(")\n"); } nn += bn; } } else { bn = D->bn; nn = D->bn*D->bn; for(k=0; k<nr; k++) { for(i=0;i<bn;i++) { printf("%4d (", k*nn+i); for(j=0; j<bn; j++) { printf("%6.2f ", D->value[k*nn + j*bn+i]); } printf(")\n"); } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
normal_gap_process.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED ) #define KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED // System includes // External includes // Project includes #include "processes/process.h" #include "includes/model_part.h" #include "processes/simple_mortar_mapper_process.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NormalGapProcess * @ingroup ContactStructuralMechanicsApplication * @brief This process computes the normal gap * @author Vicente Mataix Ferrandiz * @tparam TDim The dimension of work * @tparam TNumNodes The number of nodes of the slave * @tparam TNumNodesMaster The number of nodes of the master */ template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster = TNumNodes> class KRATOS_API(CONTACT_STRUCTURAL_MECHANICS_APPLICATION) NormalGapProcess : public Process { public: ///@name Type Definitions ///@{ /// The type of mapper considered typedef SimpleMortarMapperProcess<TDim, TNumNodes, Variable<array_1d<double, 3>>, TNumNodesMaster> MapperType; /// General type definitions typedef ModelPart::NodesContainerType NodesArrayType; /// The definition of zero tolerance static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); /// Pointer definition of NormalGapProcess KRATOS_CLASS_POINTER_DEFINITION( NormalGapProcess ); ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief The constructor of the normal gap process uses the following inputs: * @param rMasterModelPart The master model part to be considered * @param rSlaveModelPart The slave model part to be considered */ NormalGapProcess( ModelPart& rMasterModelPart, ModelPart& rSlaveModelPart, const bool SearchOrientation = true ) : mrMasterModelPart(rMasterModelPart), mrSlaveModelPart(rSlaveModelPart), mSearchOrientation(SearchOrientation) { } virtual ~NormalGapProcess()= default;; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ /** * @brief Execute method is used to execute the Process algorithms. */ void Execute() override; ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /************************************ GET INFO *************************************/ /***********************************************************************************/ std::string Info() const override { return "NormalGapProcess"; } /************************************ PRINT INFO ***********************************/ /***********************************************************************************/ void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mrMasterModelPart; /// The master model part to be considered ModelPart& mrSlaveModelPart; /// The slave model part to be considered const bool mSearchOrientation; /// The orientation of the search (inverted or not) ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method switchs the flag of an array of nodes * @param rNodes The set of nodes where the flags are reset */ static inline void SwitchFlagNodes(NodesArrayType& rNodes) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(rNodes.size()); ++i) { auto it_node = rNodes.begin() + i; it_node->Flip(SLAVE); it_node->Flip(MASTER); } } /** * @brief This method computes the normal gap * @param rNodes The set of nodes where the gap is computed */ void ComputeNormalGap(NodesArrayType& rNodes); ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class NormalGapProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /****************************** INPUT STREAM FUNCTION ******************************/ /***********************************************************************************/ template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster> inline std::istream& operator >> (std::istream& rIStream, NormalGapProcess<TDim, TNumNodes, TNumNodesMaster>& rThis); /***************************** OUTPUT STREAM FUNCTION ******************************/ /***********************************************************************************/ template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster> inline std::ostream& operator << (std::ostream& rOStream, const NormalGapProcess<TDim, TNumNodes, TNumNodesMaster>& rThis) { return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED defined
private-clauseModificado3.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], suma; for (i=0; i<n; i++) a[i] = i; suma=0; #pragma omp parallel { #pragma omp for for (i=0; i<n; i++) { suma = suma + a[i]; printf("thread %d suma a[%d] / ", omp_get_thread_num(), i); } printf("\n thread %d suma= %d", omp_get_thread_num(), suma); } printf("\n"); }
GB_unaryop__identity_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_int16 // op(A') function: GB_tran__identity_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ft.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include "../math/nas_math.h" /* global variables */ #include "global.h" #include <nautilus/nautilus.h> #include <nautilus/shell.h> /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ static int program_FT(char *_buf, void* _priv); static struct shell_cmd_impl nas_ft_impl = { .cmd = "nas-ft", .help_str = "NAS parallel benchmark FT", .handler = program_FT, }; nk_register_shell_cmd(nas_ft_impl); int program_FT(char * _buf, void *_priv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char class; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); compute_indexmap(indexmap, dims[2]); compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); fft(1, u1, u0); /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); compute_indexmap(indexmap, dims[2]); compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_CHECKSUM); } } verify(NX, NY, NZ, niter, &verified, &class); #pragma omp parallel { #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } c_print_results("FT", class, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp parallel for default(shared) private(i,j,k) for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk) for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp parallel default(shared) private(i,j,k,jj) shared(is) { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp for for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp parallel default(shared) private(i,j,k,ii) shared(is) { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp for for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp parallel default(shared) private(i,j,k,ii) shared(is) { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp for for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { #pragma omp parallel default(shared) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp for nowait for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } #pragma omp critical { sums[i].real += chk.real; sums[i].imag += chk.imag; } #pragma omp barrier #pragma omp single { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums[i].real, sums[i].imag); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *class = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *class = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *class = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *class = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *class = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *class = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*class != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("class = %1c\n", *class); }
BruteForceNnL1K2.h
#ifndef BRUTEFORCENNL1K2_H #define BRUTEFORCENNL1K2_H #include "EigenDefinitions.h" #include <cstdint> #include <emmintrin.h> #include <vector> /* * specialized brute-force computation of L1 norm with K=2 neighbours, designed * to work with SSE instructions * */ namespace spectavi { namespace filter { struct IdentityFilter { private: int m_size; int m_pos; public: IdentityFilter(int s): m_size(s), m_pos(-1) { } void init( int iyr ) { } int operator () () { m_pos++; return m_pos < m_size ? m_pos : -1; } }; } namespace implementation { uint16_t sad_16(const uint8_t a[16], const uint8_t b[16]) { __m128i _a = _mm_load_si128(reinterpret_cast<const __m128i *>(a)); __m128i _b = _mm_load_si128(reinterpret_cast<const __m128i *>(b)); __m128i sad = _mm_sad_epu8(_a, _b); return sad[0] + sad[1]; } } // namespace implementation using RowMatrixXu8 = Eigen::Matrix<uint8_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using RowMatrixXu8Map = Eigen::Map<RowMatrixXu8>; template <typename MatrixTypeLabel = RowMatrixXs> class BruteForceNnL1K2 { public: typedef RowMatrixXu8 MatrixType; typedef RowMatrixXi MatrixTypeBig; typedef MatrixType::Scalar Scalar; typedef MatrixTypeBig::Scalar ScalarBig; typedef typename MatrixTypeLabel::Scalar Label; typedef Eigen::Map<const MatrixType> MatrixTypeMap; private: MatrixTypeMap m_x; MatrixTypeMap m_y; const int m_K; public: BruteForceNnL1K2(const Scalar *x, const Scalar *y, int xrows, int yrows, int dim) : m_x(x, xrows, dim), m_y(y, yrows, dim), m_K(2) { if (m_x.cols() != m_y.cols()) { throw std::runtime_error("Matrix inner dimensions must match."); } if (m_x.cols() % 16 != 0) { // dimensions of x,y must be aligned with 16 throw std::runtime_error( "Input matrix inner dimensions must be 16-byte aligned."); } } template<typename Filter = filter::IdentityFilter> void find_neighbours(Eigen::Ref<MatrixTypeLabel> out_idx, Eigen::Ref<MatrixTypeBig> out_dist, Filter &filt, int nthread = 8) const { const int dim = m_x.cols(); const int n128i = (dim / 16); // number of 128-byte datatypes per row // scan through every row #pragma omp parallel for num_threads(nthread) for (int irow = 0; irow < m_y.rows(); ++irow) { // get local references to outputs auto &first_i = out_idx(irow, 0); auto &second_i = out_idx(irow, 1); auto &first_d = out_dist(irow, 0); auto &second_d = out_dist(irow, 1); // set distances to infinity first_d = std::numeric_limits<ScalarBig>::max(); second_d = std::numeric_limits<ScalarBig>::max(); first_i = -1; second_i = -1; // start main computation ScalarBig worst_dist = -1; const Scalar *_y = m_y.row(irow).data(); Filter _local_filt = filt; // copy the filter locally for this thread _local_filt.init(irow); // init the filter for each y-row for (int irowx = _local_filt(); irowx >= 0; irowx = _local_filt() ) { const Scalar *_x = m_x.row(irowx).data(); ScalarBig distp = 0; bool prune = false; // loop over 128-byte groups and use SSE instructions to calculate SAD for (int i128i = 0; i128i < n128i; ++i128i) { const Scalar *__x = _x + 16 * i128i; const Scalar *__y = _y + 16 * i128i; distp += implementation::sad_16(__x, __y); if (worst_dist >= 0 && distp > worst_dist) { prune = true; break; } } if (prune) { // early exit detected continue; } // std::cout << "no-prune, distp: " << distp // << " worst_dist: " << worst_dist << std::endl; if (distp < first_d) { // move things down one std::swap(first_d, second_d); std::swap(first_i, second_i); first_d = distp; first_i = irowx; } else if (distp < second_d) { // set the second value only second_d = distp; second_i = irowx; } if (second_i != -1) { worst_dist = second_d; } } } } }; } // namespace spectavi #endif // BRUTEFORCENNL1K2
GB_unop__abs_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_fp32_fc32) // op(A') function: GB (_unop_tran__abs_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cabsf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cabsf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cabsf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cabsf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cabsf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
geopm_sched.c
/* * Copyright (c) 2015 - 2022, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <pthread.h> #include <errno.h> #include <string.h> #include <signal.h> #include "geopm_sched.h" #include "geopm_error.h" #include "config.h" #ifdef _OPENMP #include <omp.h> #endif static volatile unsigned g_is_popen_complete = 0; static struct sigaction g_popen_complete_signal_action; static void geopm_sched_popen_complete(int signum) { if (signum == SIGCHLD) { g_is_popen_complete = 1; } } int geopm_sched_popen(const char *cmd, FILE **fid) { int err = 0; *fid = NULL; struct sigaction save_action; g_popen_complete_signal_action.sa_handler = geopm_sched_popen_complete; sigemptyset(&g_popen_complete_signal_action.sa_mask); g_popen_complete_signal_action.sa_flags = 0; err = sigaction(SIGCHLD, &g_popen_complete_signal_action, &save_action); if (!err) { *fid = popen(cmd, "r"); while (*fid && !g_is_popen_complete) { } g_is_popen_complete = 0; sigaction(SIGCHLD, &save_action, NULL); } if (!err && *fid == NULL) { err = errno ? errno : GEOPM_ERROR_RUNTIME; } return err; } int geopm_sched_num_cpu(void) { return sysconf(_SC_NPROCESSORS_CONF); } int geopm_sched_get_cpu(void) { return sched_getcpu(); } static pthread_once_t g_proc_cpuset_once = PTHREAD_ONCE_INIT; static cpu_set_t *g_proc_cpuset = NULL; static size_t g_proc_cpuset_size = 0; /* If /proc/self/status is usable and correct then parse this file to determine the process affinity. */ int geopm_sched_proc_cpuset_helper(int num_cpu, uint32_t *proc_cpuset, FILE *fid) { const char *key = "Cpus_allowed:"; const size_t key_len = strlen(key); const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0); int err = 0; char *line = NULL; size_t line_len = 0; int read_idx = 0; while ((getline(&line, &line_len, fid)) != -1) { if (strncmp(line, key, key_len) == 0) { char *line_ptr = line + key_len; /* On some systems we have seen the mask padded with zeros beyond the number of online CPUs. Deal with this by skipping extra leading 32 bit masks */ int num_comma = 0; char *comma_ptr = line_ptr; while ((comma_ptr = strchr(comma_ptr, ','))) { ++comma_ptr; ++num_comma; } if (num_comma > num_read - 1) { num_comma -= num_read - 1; for (int i = 0; !err && i < num_comma; ++i) { line_ptr = strchr(line_ptr, ','); if (!line_ptr) { err = GEOPM_ERROR_LOGIC; } else { ++line_ptr; } } } for (read_idx = num_read - 1; !err && read_idx >= 0; --read_idx) { int num_match = sscanf(line_ptr, "%x", proc_cpuset + read_idx); if (num_match != 1) { err = GEOPM_ERROR_RUNTIME; } else { line_ptr = strchr(line_ptr, ','); if (read_idx != 0 && line_ptr == NULL) { err = GEOPM_ERROR_RUNTIME; } else { ++line_ptr; } } } } } if (line) { free(line); } if (read_idx != -1) { err = GEOPM_ERROR_RUNTIME; } return err; } static void geopm_proc_cpuset_once(void) { const char *status_path = "/proc/self/status"; const int num_cpu = geopm_sched_num_cpu(); const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0); int err = 0; uint32_t *proc_cpuset = NULL; FILE *fid = NULL; g_proc_cpuset = CPU_ALLOC(num_cpu); if (g_proc_cpuset == NULL) { err = ENOMEM; } if (!err) { g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu); proc_cpuset = calloc(num_read, sizeof(*proc_cpuset)); if (proc_cpuset == NULL) { err = ENOMEM; } } if (!err) { fid = fopen(status_path, "r"); if (!fid) { err = errno ? errno : GEOPM_ERROR_RUNTIME; } } if (!err) { err = geopm_sched_proc_cpuset_helper(num_cpu, proc_cpuset, fid); } if (fid) { fclose(fid); } if (!err) { /* cpu_set_t is managed in units of unsigned long, and may have extra * bits at the end with undefined values. If that happens, * g_proc_cpuset_size may be greater than the size of proc_cpuset, * resulting in reading past the end of proc_cpuset. Avoid this by * only copying the number of bytes needed to contain the mask. Zero * the destination first, since it may not be fully overwritten. * * See the CPU_SET(3) man page for more details about cpu_set_t. */ CPU_ZERO_S(g_proc_cpuset_size, g_proc_cpuset); memcpy(g_proc_cpuset, proc_cpuset, num_read * sizeof(*proc_cpuset)); } else if (g_proc_cpuset) { for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset); } } if (proc_cpuset) { free(proc_cpuset); } } int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset) { int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once); int sched_num_cpu = geopm_sched_num_cpu(); size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu); if (!err && cpuset_size < g_proc_cpuset_size) { err = GEOPM_ERROR_INVALID; } if (!err) { /* Copy up to the smaller of the sizes to avoid buffer overruns. Zero * the destination set first, since it may not be fully overwritten */ CPU_ZERO_S(cpuset_size, proc_cpuset); memcpy(proc_cpuset, g_proc_cpuset, g_proc_cpuset_size); for (int i = sched_num_cpu; i < num_cpu; ++i) { CPU_CLR_S(i, cpuset_size, proc_cpuset); } } return err; } int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp) { /*! @brief Function that returns a cpuset that has bits set for all CPUs enabled for the process which are not used by OpenMP. Rather than returning an empty mask, if all CPUs allocated for the process are used by OpenMP, then the woomp mask will have all bits set. */ int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once); int sched_num_cpu = geopm_sched_num_cpu(); size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu); if (!err && !g_proc_cpuset) { err = ENOMEM; } if (!err && req_alloc_size < g_proc_cpuset_size) { err = EINVAL; } if (!err) { /* Copy the process CPU mask into the output. */ CPU_ZERO_S(req_alloc_size, woomp); memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size); /* Start an OpenMP parallel region and have each thread clear its bit from the mask. */ #ifdef _OPENMP #pragma omp parallel default(shared) { #pragma omp critical { int cpu_index = sched_getcpu(); if (cpu_index != -1 && cpu_index < num_cpu) { /* Clear the bit for this OpenMP thread's CPU. */ CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp); } else { err = errno ? errno : GEOPM_ERROR_LOGIC; } } /* end pragma omp critical */ } /* end pragma omp parallel */ #endif /* _OPENMP */ } if (!err) { for (int i = sched_num_cpu; i < num_cpu; ++i) { CPU_CLR_S(i, req_alloc_size, woomp); } } if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) { /* If all CPUs are used by the OpenMP gang, then leave the mask open and allow the Linux scheduler to choose. */ for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, woomp); } } return err; }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriately. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L A H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLAHEImage() is a variant of adaptive histogram equalization in which the % contrast amplification is limited, so as to reduce this problem of noise % amplification. % % Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic Press, 1994. % % The format of the CLAHEImage method is: % % MagickBooleanType CLAHEImage(Image *image,const size_t width, % const size_t height,const size_t number_bins,const double clip_limit, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the tile divisions to use in horizontal direction. % % o height: the height of the tile divisions to use in vertical direction. % % o number_bins: number of bins for histogram ("dynamic range"). % % o clip_limit: contrast limit for localised changes in contrast. A limit % less than 1 results in standard non-contrast limited AHE. % % o exception: return any errors or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins, size_t *histogram) { #define NumberCLAHEGrays (65536) register ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* Compute total number of excess pixels. */ cumulative_excess=0; for (i=0; i < (ssize_t) number_bins; i++) { excess=(ssize_t) histogram[i]-(ssize_t) clip_limit; if (excess > 0) cumulative_excess+=excess; } /* Clip histogram and redistribute excess pixels across all bins. */ step=cumulative_excess/number_bins; excess=(ssize_t) (clip_limit-step); for (i=0; i < (ssize_t) number_bins; i++) { if ((double) histogram[i] > clip_limit) histogram[i]=(size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess-=histogram[i]-excess; histogram[i]=(size_t) clip_limit; } else { cumulative_excess-=step; histogram[i]+=step; } } /* Redistribute remaining excess. */ do { register size_t *p; size_t *q; previous_excess=cumulative_excess; p=histogram; q=histogram+number_bins; while ((cumulative_excess != 0) && (p < q)) { step=number_bins/cumulative_excess; if (step < 1) step=1; for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step) if ((double) *p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const size_t number_bins, const unsigned short *lut,const unsigned short *pixels,size_t *histogram) { register const unsigned short *p; register ssize_t i; /* Classify the pixels into a gray histogram. */ for (i=0; i < (ssize_t) number_bins; i++) histogram[i]=0L; p=pixels; for (i=0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q=p+tile_info->width; while (p < q) histogram[lut[*p++]]++; q+=clahe_info->width; p=q-tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12, const size_t *Q22,const size_t *Q11,const size_t *Q21, const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels) { ssize_t y; unsigned short intensity; /* Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y=(ssize_t) tile->height; y > 0; y--) { register ssize_t x; for (x=(ssize_t) tile->width; x > 0; x--) { intensity=lut[*pixels]; *pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width* tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)* Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+ (tile->width-x)*Q21[intensity]))); } pixels+=(clahe_info->width-tile->width); } } static void GenerateCLAHELut(const RangeInfo *range_info, const size_t number_bins,unsigned short *lut) { ssize_t i; unsigned short delta; /* Scale input image [intensity min,max] to [0,number_bins-1]. */ delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1); for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i]=(unsigned short) ((i-range_info->min)/delta); } static void MapCLAHEHistogram(const RangeInfo *range_info, const size_t number_bins,const size_t number_pixels,size_t *histogram) { double scale, sum; register ssize_t i; /* Rescale histogram to range [min-intensity .. max-intensity]. */ scale=(double) (range_info->max-range_info->min)/number_pixels; sum=0.0; for (i=0; i < (ssize_t) number_bins; i++) { sum+=histogram[i]; histogram[i]=(size_t) (range_info->min+scale*sum); if (histogram[i] > range_info->max) histogram[i]=range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const RangeInfo *range_info, const size_t number_bins,const double clip_limit,unsigned short *pixels) { MemoryInfo *tile_cache; register unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return(MagickTrue); tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y, number_bins*sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return(MagickFalse); lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut)); if (lut == (unsigned short *) NULL) { tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickFalse); } tiles=(size_t *) GetVirtualMemoryBlob(tile_cache); limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins); if (limit < 1UL) limit=1UL; /* Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info,number_bins,lut); p=pixels; for (y=0; y < (ssize_t) clahe_info->y; y++) { register ssize_t x; for (x=0; x < (ssize_t) clahe_info->x; x++) { size_t *histogram; histogram=tiles+(number_bins*(y*clahe_info->x+x)); GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram); ClipCLAHEHistogram((double) limit,number_bins,histogram); MapCLAHEHistogram(range_info,number_bins,tile_info->width* tile_info->height,histogram); p+=tile_info->width; } p+=clahe_info->width*(tile_info->height-1); } /* Interpolate greylevel mappings to get CLAHE image. */ p=pixels; for (y=0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; register ssize_t x; tile.height=tile_info->height; tile.y=y-1; offset.y=tile.y+1; if (y == 0) { /* Top row. */ tile.height=tile_info->height >> 1; tile.y=0; offset.y=0; } else if (y == (ssize_t) clahe_info->y) { /* Bottom row. */ tile.height=(tile_info->height+1) >> 1; tile.y=clahe_info->y-1; offset.y=tile.y; } for (x=0; x <= (ssize_t) clahe_info->x; x++) { tile.width=tile_info->width; tile.x=x-1; offset.x=tile.x+1; if (x == 0) { /* Left column. */ tile.width=tile_info->width >> 1; tile.x=0; offset.x=0; } else if (x == (ssize_t) clahe_info->x) { /* Right column. */ tile.width=(tile_info->width+1) >> 1; tile.x=clahe_info->x-1; offset.x=tile.x; } InterpolateCLAHE(clahe_info, tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */ tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */ tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */ tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */ &tile,lut,p); p+=tile.width; } p+=clahe_info->width*(tile.height-1); } lut=(unsigned short *) RelinquishMagickMemory(lut); tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width, const size_t height,const size_t number_bins,const double clip_limit, ExceptionInfo *exception) { #define CLAHEImageTag "CLAHE/Image" CacheView *image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo *pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); range_info.min=0; range_info.max=NumberCLAHEGrays-1; tile_info.width=width; if (tile_info.width == 0) tile_info.width=image->columns >> 3; tile_info.height=height; if (tile_info.height == 0) tile_info.height=image->rows >> 3; tile_info.x=0; if ((image->columns % tile_info.width) != 0) tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width); tile_info.y=0; if ((image->rows % tile_info.height) != 0) tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height); clahe_info.width=image->columns+tile_info.x; clahe_info.height=image->rows+tile_info.y; clahe_info.x=(ssize_t) clahe_info.width/tile_info.width; clahe_info.y=(ssize_t) clahe_info.height/tile_info.height; pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height* sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache); colorspace=image->colorspace; if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse) { pixel_cache=RelinquishVirtualMemory(pixel_cache); return(MagickFalse); } /* Initialize CLAHE pixels. */ image_view=AcquireVirtualCacheView(image,exception); progress=0; status=MagickTrue; n=0; for (y=0; y < (ssize_t) clahe_info.height; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y- (tile_info.y >> 1),clahe_info.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) clahe_info.width; x++) { pixels[n++]=ScaleQuantumToShort(p[0]); p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ? (size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); /* Push CLAHE pixels to CLAHE image. */ image_view=AcquireAuthenticCacheView(image,exception); n=clahe_info.width*(tile_info.y >> 1); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } n+=tile_info.x >> 1; for (x=0; x < (ssize_t) image->columns; x++) { q[0]=ScaleShortToQuantum(pixels[n++]); q+=GetPixelChannels(image); } n+=(clahe_info.width-image->columns-(tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); pixel_cache=RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); status=InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black)); white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,0,0,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum *magick_restrict r; GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); } SetPixelViaPixelInfo(enhance_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=(double) p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(intensity))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) memset(black,0,sizeof(*black)); (void) memset(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaImageTag "Gamma/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; pixel2=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel3=zero; area=point.y; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel3); offset+=cube_size; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel4); pixel=zero; area=point.z; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, area,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriately. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (IsPixelGray(image,q) == MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e B a l a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteBalanceImage() applies white balancing to an image according to a % grayworld assumption in the LAB colorspace. % % The format of the WhiteBalanceImage method is: % % MagickBooleanType WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image *image, ExceptionInfo *exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView *image_view; const char *artifact; double a_mean, b_mean; MagickOffsetType progress; MagickStatusType status; ssize_t y; /* White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=TransformImageColorspace(image,LabColorspace,exception); a_mean=0.0; b_mean=0.0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { a_mean+=QuantumScale*GetPixela(image,p)-0.5; b_mean+=QuantumScale*GetPixelb(image,p)-0.5; p+=GetPixelChannels(image); } } a_mean/=((double) image->columns*image->rows); b_mean/=((double) image->columns*image->rows); progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; /* Scale the chroma distance shifted according to amount of luminance. */ a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean; b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean; SetPixela(image,ClampToQuantum(a),q); SetPixelb(image,ClampToQuantum(b),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); artifact=GetImageArtifact(image,"white-balance:vibrance"); if (artifact != (const char *) NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* Level the a & b channels. */ flags=ParseGeometry(artifact,&geometry_info); black_point=geometry_info.rho; if ((flags & PercentValue) != 0) black_point*=(double) (QuantumRange/100.0); channel_mask=SetImageChannelMask(image,(ChannelType) (aChannel | bChannel)); status&=LevelImage(image,black_point,(double) QuantumRange-black_point, 1.0,exception); (void) SetImageChannelMask(image,channel_mask); } status&=TransformImageColorspace(image,sRGBColorspace,exception); return(status != 0 ? MagickTrue : MagickFalse); }
GB_binop__pow_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint8) // C=scalar+B GB (_bind1st__pow_uint8) // C=scalar+B' GB (_bind1st_tran__pow_uint8) // C=A+scalar GB (_bind2nd__pow_uint8) // C=A'+scalar GB (_bind2nd_tran__pow_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_pow_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_pow_uint8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = GB_pow_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = GB_pow_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
a.24.1.c
/* { dg-do compile } */ /* { dg-require-effective-target tls } */ extern int omp_get_num_threads (void); int x, y, t, z[1000]; #pragma omp threadprivate(x) void a24 (int a) { const int c = 1; int i = 0; int l = 0; #pragma omp parallel default(none) private(a) shared(z) { int j = omp_get_num_threads (); /* O.K. - j is declared within parallel region */ /* O.K. - a is listed in private clause */ /* - z is listed in shared clause */ x = c; /* O.K. - x is threadprivate */ /* - c has const-qualified type */ z[i] = y; /* { dg-error "'i' not specified" "" { target *-*-* } 21 } */ /* { dg-error "enclosing parallel" "" { target *-*-* } 13 } */ /* { dg-error "'y' not specified" "" { target *-*-* } 21 } */ #pragma omp for firstprivate(y) for (i = 0; i < 10; i++) { z[i] = y; /* O.K. - i is the loop iteration variable */ /* - y is listed in firstprivate clause */ } z[l] = t; /* { dg-error "'l' not specified" "" { target *-*-* } 31 } */ /* { dg-error "'t' not specified" "" { target *-*-* } 31 } */ } }
GB_unop__lnot_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_int8_int8) // op(A') function: GB (_unop_tran__lnot_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_int8_int8) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
syrk.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "syrk.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni), DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(NI) ") final)"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(NJ) ") final)"))); *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) C[i][j] = ((DATA_TYPE) i*j) / ni; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < ni; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, C[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syrk(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,NI,NI,ni,ni), DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { int i, j, k; #pragma scop #pragma omp parallel { /* C := alpha*A*A' + beta*C */ #pragma omp for private(j) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NI; j++) C[i][j] *= beta; #pragma omp for private(j,k) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NI; j++) for (k = 0; k < _PB_NJ; k++) C[i][j] += alpha * A[i][k] * A[j][k]; } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ DATA_TYPE alpha __attribute__((annotate("target('alpha') scalar()"))); DATA_TYPE beta __attribute__((annotate("target('beta') scalar()"))); POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE __attribute__((annotate("target('C') scalar(range(0, 12000000000000) final)"))),NI,NI,ni,ni); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE __attribute__((annotate("target('A') scalar()"))),NI,NJ,ni,nj); /* Initialize array(s). */ init_array (ni, nj, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syrk (ni, nj, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); return 0; }
mbpush3.c
/* C Library for Skeleton 3D Electromagnetic OpenMP PIC Code */ /* written by Viktor K. Decyk, UCLA */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include "mbpush3.h" /*--------------------------------------------------------------------*/ double ranorm() { /* this program calculates a random number y from a gaussian distribution with zero mean and unit variance, according to the method of mueller and box: y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1)) y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)), where x is a random number uniformly distributed on (0,1). written for the ibm by viktor k. decyk, ucla local data */ static int r1 = 885098780, r2 = 1824280461; static int r4 = 1396483093, r5 = 55318673; static int iflg = 0; static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0; static double r0 = 0.0; int isc, i1; double ranorm, r3, asc, bsc, temp; if (iflg==1) { ranorm = r0; r0 = 0.0; iflg = 0; return ranorm; } isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r1 - (r1/isc)*isc; r3 = h1l*(double) r1 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r2/isc; isc = r2 - i1*isc; r0 = h1l*(double) r2 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r2 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r1 = r3 - ((double) isc)*bsc; temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc)); isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r4 - (r4/isc)*isc; r3 = h2l*(double) r4 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r5/isc; isc = r5 - i1*isc; r0 = h2l*(double) r5 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r5 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r4 = r3 - ((double) isc)*bsc; r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc); ranorm = temp*sin(r0); r0 = temp*cos(r0); iflg = 1; return ranorm; } /*--------------------------------------------------------------------*/ void cdistr3(float part[], float vtx, float vty, float vtz, float vdx, float vdy, float vdz, int npx, int npy, int npz, int idimp, int nop, int nx, int ny, int nz, int ipbc) { /* for 3d code, this subroutine calculates initial particle co-ordinates and velocities with uniform density and maxwellian velocity with drift part[n][0] = position x of particle n part[n][1] = position y of particle n part[n][2] = position z of particle n part[n][3] = velocity vx of particle n part[n][4] = velocity vy of particle n part[n][5] = velocity vz of particle n vtx/vty/vtz = thermal velocity of electrons in x/y/z direction vdx/vdy/vdz = drift velocity of beam electrons in x/y/z direction npx/npy/npz = initial number of particles distributed in x/y/z direction idimp = size of phase space = 6 nop = number of particles nx/ny/nz = system length in x/y/z direction ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) ranorm = gaussian random number with zero mean and unit variance local data */ int j, k, l, k1, l1, npxy, npxyz; float edgelx, edgely, edgelz, at1, at2, at3, at4, at5; float sum1, sum2, sum3; double dsum1, dsum2, dsum3; npxy = npx*npy; npxyz = npxy*npz; /* set boundary values */ edgelx = 0.0; edgely = 0.0; edgelz = 0.0; at1 = (float) nx/(float) npx; at2 = (float) ny/(float) npy; at3 = (float) nz/(float) npz; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; edgelz = 1.0; at1 = (float) (nx-2)/(float) npx; at2 = (float) (ny-2)/(float) npy; at3 = (float) (nz-2)/(float) npz; } else if (ipbc==3) { edgelx = 1.0; edgely = 1.0; edgelz = 0.0; at1 = (float) (nx-2)/(float) npx; at2 = (float) (ny-2)/(float) npy; } /* uniform density profile */ for (l = 0; l < npz; l++) { l1 = idimp*npxy*l; at5 = edgelz + at3*(((float) l) + 0.5); for (k = 0; k < npy; k++) { k1 = idimp*npx*k + l1; at4 = edgely + at2*(((float) k) + 0.5); for (j = 0; j < npx; j++) { part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5); part[1+idimp*j+k1] = at4; part[2+idimp*j+k1] = at5; } } } /* maxwellian velocity distribution */ for (j = 0; j < npxyz; j++) { part[3+idimp*j] = vtx*ranorm(); part[4+idimp*j] = vty*ranorm(); part[5+idimp*j] = vtz*ranorm(); } /* add correct drift */ dsum1 = 0.0; dsum2 = 0.0; dsum3 = 0.0; for (j = 0; j < npxyz; j++) { dsum1 += part[3+idimp*j]; dsum2 += part[4+idimp*j]; dsum3 += part[5+idimp*j]; } sum1 = dsum1; sum2 = dsum2; sum3 = dsum3; at1 = 1.0/(float) npxyz; sum1 = at1*sum1 - vdx; sum2 = at1*sum2 - vdy; sum3 = at1*sum3 - vdz; for (j = 0; j < npxyz; j++) { part[3+idimp*j] -= sum1; part[4+idimp*j] -= sum2; part[5+idimp*j] -= sum3; } return; } /*--------------------------------------------------------------------*/ void cdblkp3l(float part[], int kpic[], int *nppmx, int idimp, int nop, int mx, int my, int mz, int mx1, int my1, int mxyz1, int *irc) { /* this subroutine finds the maximum number of particles in each tile of mx, my, mz to calculate size of segmented particle array ppart linear interpolation input: all except kpic, nppmx, output: kpic, nppmx part = input particle array part[n][0] = position x of particle n part[n][1] = position y of particle n part[n][2] = position z of particle n kpic = output number of particles per tile nppmx = return maximum number of particles in tile idimp = size of phase space = 6 nop = number of particles mx/my/mz = number of grids in sorting cell in x, y and z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int j, k, n, m, l, mxy1, isum, ist, npx, ierr; ierr = 0; mxy1 = mx1*my1; /* clear counter array */ for (k = 0; k < mxyz1; k++) { kpic[k] = 0; } /* find how many particles in each tile */ for (j = 0; j < nop; j++) { n = part[idimp*j]; n = n/mx; m = part[1+idimp*j]; m = m/my; l = part[2+idimp*j]; l = l/mz; m = n + mx1*m + mxy1*l; if (m < mxyz1) { kpic[m] += 1; } else { ierr = ierr > (m - mxyz1 + 1) ? ierr : (m - mxyz1 + 1); } } /* find maximum */ isum = 0; npx = 0; for (k = 0; k < mxyz1; k++) { ist = kpic[k]; npx = npx > ist ? npx : ist; isum += ist; } *nppmx = npx; /* check for errors */ if (ierr > 0) { *irc = ierr; } else if (isum != nop) { *irc = -1; } return; } /*--------------------------------------------------------------------*/ void cppmovin3l(float part[], float ppart[], int kpic[], int nppmx, int idimp, int nop, int mx, int my, int mz, int mx1, int my1, int mxyz1, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz and copies to segmented array ppart linear interpolation input: all except ppart, kpic, output: ppart, kpic part/ppart = input/output particle arrays part[n][0] = position x of particle n part[n][1] = position y of particle n part[n][2] = position z of particle n ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = velocity vx of particle n in tile m ppart[m][n][4] = velocity vy of particle n in tile m ppart[m][n][5] = velocity vz of particle n in tile m kpic = output number of particles per tile nppmx = maximum number of particles in tile idimp = size of phase space = 6 nop = number of particles mx/my/mz = number of grids in sorting cell in x, y and z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int i, j, k, n, m, l, mxy1, ip, ierr; ierr = 0; mxy1 = mx1*my1; /* clear counter array */ for (k = 0; k < mxyz1; k++) { kpic[k] = 0; } /* find addresses of particles at each tile and reorder particles */ for (j = 0; j < nop; j++) { n = part[idimp*j]; n = n/mx; m = part[1+idimp*j]; m = m/my; l = part[2+idimp*j]; l = l/mz; m = n + mx1*m + mxy1*l; ip = kpic[m]; if (ip < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j]; } } else { ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1; } kpic[m] = ip + 1; } if (ierr > 0) *irc = ierr; return; } /*--------------------------------------------------------------------*/ void cppcheck3l(float ppart[], int kpic[], int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int mx1, int my1, int mz1, int *irc) { /* this subroutine performs a sanity check to make sure particles sorted by x,y,z grid in tiles of mx, my, mz, are all within bounds. tiles are assumed to be arranged in 3D linear memory input: all except irc output: irc ppart[l][n][0] = position x of particle n in tile l ppart[l][n][1] = position y of particle n in tile l ppart[l][n][2] = position a of particle n in tile l kpic(l) = number of reordered output particles in tile l idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = number of grids in sorting cell in x/y/z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 irc = particle error, returned only if error occurs, when irc > 0 local data */ int mxy1, mxyz1, noff, moff, loff, npp, j, k, l, nn, mm, ll, ist; float edgelx, edgely, edgelz, edgerx, edgery, edgerz, dx, dy, dz; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; /* loop over tiles */ #pragma omp parallel for \ private(j,k,l,noff,moff,loff,npp,nn,mm,ll,ist,edgelx,edgely,edgelz, \ edgerx,edgery,edgerz,dx,dy,dz) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; /* loop over particles in tile */ for (j = 0; j < npp; j++) { dx = ppart[idimp*(j+nppmx*l)]; dy = ppart[1+idimp*(j+nppmx*l)]; dz = ppart[2+idimp*(j+nppmx*l)]; /* find particles going out of bounds */ ist = 0; if (dx < edgelx) ist = 1; if (dx >= edgerx) ist = 2; if (dy < edgely) ist += 3; if (dy >= edgery) ist += 6; if (dz < edgelz) ist += 9; if (dz >= edgerz) ist += 18; if (ist > 0) *irc = l + 1; } } return; } /*--------------------------------------------------------------------*/ void cgbppush3l(float ppart[], float fxyz[], float bxyz[], int kpic[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. OpenMP version using guard cells data read in tiles particles stored segmented array 190 flops/particle, 1 divide, 54 loads, 6 stores input: all, output: ppart, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot(1) = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot(2) = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(3) = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(4) = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(5) = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot(6) = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(7) = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(8) = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(9) = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and omz = (q/m)*bz(x(t),y(t),z(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt z(t+dt)=z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = velocity vx of particle n in tile m ppart[m][n][4] = velocity vy of particle n in tile m ppart[m][n][5] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + .25*(vz(t+dt/2) + vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float qtmh, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, z; float sfxyz[3*MXV*MYV*MZV], sbxyz[3*MXV*MYV*MZV]; /* float sfxyz[3*(mx+1)*(my+1)*(mz+1)]; */ /* float sbxyz[3*(mx+1)*(my+1)*(mz+1)]; */ double sum1, sum2; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; qtmh = 0.5f*qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,x,y,z,dxp,dyp,dzp, \ amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm, \ rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxyz[3*(i+mxv*j+mxyv*k)] = fxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+3*(i+mxv*j+mxyv*k)] = fxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+3*(i+mxv*j+mxyv*k)] = fxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxyz[3*(i+mxv*j+mxyv*k)] = bxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+3*(i+mxv*j+mxyv*k)] = bxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+3*(i+mxv*j+mxyv*k)] = bxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+3]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+3]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[3+idimp*(j+npoff)] + dx; acy = ppart[4+idimp*(j+npoff)] + dy; acz = ppart[5+idimp*(j+npoff)] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[3+idimp*(j+npoff)] = dx; ppart[4+idimp*(j+npoff)] = dy; ppart[5+idimp*(j+npoff)] = dz; /* new position */ dx = x + dx*dtc; dy = y + dy*dtc; dz = z + dz*dtc; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)]; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[5+idimp*(j+npoff)] = -ppart[5+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; } sum2 += sum1; } /* normalize kinetic energy */ *ek += 0.5f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgbppushf3l(float ppart[], float fxyz[], float bxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP version using guard cells data read in tiles particles stored segmented array 190 flops/particle, 1 divide, 54 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot(1) = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot(2) = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(3) = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(4) = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(5) = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot(6) = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(7) = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(8) = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(9) = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and omz = (q/m)*bz(x(t),y(t),z(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt z(t+dt)=z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = velocity vx of particle n in tile m ppart[m][n][4] = velocity vy of particle n in tile m ppart[m][n][5] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + .25*(vz(t+dt/2) + vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float qtmh, acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, z; float sfxyz[3*MXV*MYV*MZV], sbxyz[3*MXV*MYV*MZV]; /* float sfxyz[3*(mx+1)*(my+1)*(mz+1)]; */ /* float sbxyz[3*(mx+1)*(my+1)*(mz+1)]; */ double sum1, sum2; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; qtmh = 0.5f*qbm*dt; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,ih,nh,x,y,z,dxp, \ dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt, \ omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely, \ edgelz,edgerx,edgery,edgerz,sum1,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxyz[3*(i+mxv*j+mxyv*k)] = fxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+3*(i+mxv*j+mxyv*k)] = fxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+3*(i+mxv*j+mxyv*k)] = fxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxyz[3*(i+mxv*j+mxyv*k)] = bxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+3*(i+mxv*j+mxyv*k)] = bxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+3*(i+mxv*j+mxyv*k)] = bxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ for (j = 0; j < 26; j++) { ncl[j+26*l] = 0; } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+3]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+3]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[3+idimp*(j+npoff)] + dx; acy = ppart[4+idimp*(j+npoff)] + dy; acz = ppart[5+idimp*(j+npoff)] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[3+idimp*(j+npoff)] = dx; ppart[4+idimp*(j+npoff)] = dy; ppart[5+idimp*(j+npoff)] = dz; /* new position */ dx = x + dx*dtc; dy = y + dy*dtc; dz = z + dz*dtc; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } sum2 += sum1; /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += 0.5f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgrbppush3l(float ppart[], float fxyz[], float bxyz[], int kpic[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. OpenMP version using guard cells data read in tiles particles stored segmented array 202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores input: all, output: ppart, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot(1) = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot(2) = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(3) = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(4) = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(5) = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot(6) = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(7) = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(8) = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(9) = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t))*gami, omy = (q/m)*by(x(t),y(t),z(t))*gami, omz = (q/m)*bz(x(t),y(t),z(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg z(t+dt) = z(t) + pz(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = momentum px of particle n in tile m ppart[m][n][4] = momentum py of particle n in tile m ppart[m][n][5] = momentum pz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float qtmh, ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, p2, gami, qtmg, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg; float x, y, z; float sfxyz[3*MXV*MYV*MZV], sbxyz[3*MXV*MYV*MZV]; /* float sfxyz[3*(mx+1)*(my+1)*(mz+1)]; */ /* float sbxyz[3*(mx+1)*(my+1)*(mz+1)]; */ double sum1, sum2; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,x,y,z,dxp,dyp,dzp, \ amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm, \ rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg,dtg,sum1, \ sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxyz[3*(i+mxv*j+mxyv*k)] = fxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+3*(i+mxv*j+mxyv*k)] = fxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+3*(i+mxv*j+mxyv*k)] = fxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxyz[3*(i+mxv*j+mxyv*k)] = bxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+3*(i+mxv*j+mxyv*k)] = bxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+3*(i+mxv*j+mxyv*k)] = bxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+3]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+3]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[3+idimp*(j+npoff)] + dx; acy = ppart[4+idimp*(j+npoff)] + dy; acz = ppart[5+idimp*(j+npoff)] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[3+idimp*(j+npoff)] = dx; ppart[4+idimp*(j+npoff)] = dy; ppart[5+idimp*(j+npoff)] = dz; /* update inverse gamma */ p2 = dx*dx + dy*dy + dz*dz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + dx*dtg; dy = y + dy*dtg; dz = z + dz*dtg; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)]; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[5+idimp*(j+npoff)] = -ppart[5+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; } sum2 += sum1; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgrbppushf3l(float ppart[], float fxyz[], float bxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP version using guard cells data read in tiles particles stored segmented array 202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot(1) = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot(2) = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(3) = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(4) = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot(5) = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot(6) = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(7) = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(8) = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot(9) = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t))*gami, omy = (q/m)*by(x(t),y(t),z(t))*gami, omz = (q/m)*bz(x(t),y(t),z(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg z(t+dt) = z(t) + pz(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = momentum px of particle n in tile m ppart[m][n][4] = momentum py of particle n in tile m ppart[m][n][5] = momentum pz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, p2, gami, qtmg, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg; float qtmh, ci2, x, y, z; float sfxyz[3*MXV*MYV*MZV], sbxyz[3*MXV*MYV*MZV]; /* float sfxyz[3*(mx+1)*(my+1)*(mz+1)]; */ /* float sbxyz[3*(mx+1)*(my+1)*(mz+1)]; */ double sum1, sum2; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,ih,nh,x,y,z,dxp, \ dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt, \ omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg, \ dtg,edgelx,edgely,edgelz,edgerx,edgery,edgerz,sum1,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxyz[3*(i+mxv*j+mxyv*k)] = fxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+3*(i+mxv*j+mxyv*k)] = fxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+3*(i+mxv*j+mxyv*k)] = fxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxyz[3*(i+mxv*j+mxyv*k)] = bxyz[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+3*(i+mxv*j+mxyv*k)] = bxyz[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+3*(i+mxv*j+mxyv*k)] = bxyz[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ for (j = 0; j < 26; j++) { ncl[j+26*l] = 0; } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+3]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+3]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+3]; mm = nn + 3*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+3]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+3]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+3]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+3]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); nn += 3*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+3]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+3]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+3]; mm = nn + 3*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+3]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+3]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+3]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[3+idimp*(j+npoff)] + dx; acy = ppart[4+idimp*(j+npoff)] + dy; acz = ppart[5+idimp*(j+npoff)] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[3+idimp*(j+npoff)] = dx; ppart[4+idimp*(j+npoff)] = dy; ppart[5+idimp*(j+npoff)] = dz; /* update inverse gamma */ p2 = dx*dx + dy*dy + dz*dz; dtg = dtc/sqrtf(1.0 + p2*ci2); /* new position */ dx = x + dx*dtg; dy = y + dy*dtg; dz = z + dz*dtg; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } sum2 += sum1; /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgppost3l(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, dxp, dyp, dzp, amx, amy, amz, dx1; float sq[MXV*MYV*MZV]; /* float sq[(mx+1)*(my+1)*(mz+1)]; */ /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,lm,x,y,z,dxp,dyp, \ dzp,amx,amy,amz,dx1,sq) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; /* zero out local accumulator */ for (j = 0; j < mxyv*(mz+1); j++) { sq[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit charge within tile to local accumulator */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; sq[nn] = x; sq[nn+1] = y; mm = nn + mxv; x = sq[mm] + dyp*amz; y = sq[mm+1] + dx1*amz; sq[mm] = x; sq[mm+1] = y; nn += mxyv; x = sq[nn] + amx*dzp; y = sq[nn+1] + amy*dzp; sq[nn] = x; sq[nn+1] = y; mm = nn + mxv; x = sq[mm] + dyp*dzp; y = sq[mm+1] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgjppost3l(float ppart[], float cu[], int kpic[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step OpenMP version using guard cells data deposited in tiles particles stored segmented array 69 flops/particle, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*vi, where i = x,y,z ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = velocity vx of particle n in tile m ppart[m][n][4] = velocity vy of particle n in tile m ppart[m][n][5] = velocity vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z; float scu[3*MXV*MYV*MZV]; /* float scu[3*(mx+1)*(my+1)*(mz+1)]; */ /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,lm,x,y,z,dxp,dyp, \ dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; /* zero out local accumulator */ for (j = 0; j < 3*mxyv*(mz+1); j++) { scu[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ppart[3+idimp*(j+npoff)]; vy = ppart[4+idimp*(j+npoff)]; vz = ppart[5+idimp*(j+npoff)]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*amz; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; dy = amy*dzp; nn += 3*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*dzp; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[5+idimp*(j+npoff)] = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -vy; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(i+mxv*j+mxyv*k)]; cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(i+mxv*j+mxyv*k)]; cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[3*(i+mxv*j)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+3*(i+mxv*j)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+3*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[3*(i+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+3*(i+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+3*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+3*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+3*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgjppostf3l(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP version using guard cells data deposited in tiles particles stored segmented array 69 flops/particle, 30 loads, 27 stores input: all except ncl, ihole, irc, output: ppart, cu, ncl, ihole, irc current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*vi, where i = x,y,z ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = velocity vx of particle n in tile m ppart[m][n][4] = velocity vy of particle n in tile m ppart[m][n][5] = velocity vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, ih, nh, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z; float scu[3*MXV*MYV*MZV]; /* float scu[3*(mx+1)*(my+1)*(mz+1)]; */ /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; anx = (float) nx; any = (float) ny; anz = (float) nz; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,lm,ih,nh,x,y,z, \ dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,edgelx,edgely,edgelz, \ edgerx,edgery,edgerz,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* zero out local accumulator */ for (j = 0; j < 3*mxyv*(mz+1); j++) { scu[j] = 0.0f; } /* clear counters */ for (j = 0; j < 26; j++) { ncl[j+26*l] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ppart[3+idimp*(j+npoff)]; vy = ppart[4+idimp*(j+npoff)]; vz = ppart[5+idimp*(j+npoff)]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*amz; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; dy = amy*dzp; nn += 3*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*dzp; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(i+mxv*j+mxyv*k)]; cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(i+mxv*j+mxyv*k)]; cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[3*(i+mxv*j)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+3*(i+mxv*j)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+3*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[3*(i+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+3*(i+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+3*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+3*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+3*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(nm-1+mxv*j+mxyv*(lm-1))]; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgrjppost3l(float ppart[], float cu[], int kpic[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step OpenMP version using guard cells data deposited in tiles particles stored segmented array 79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = x momentum of particle n in tile m ppart[m][n][4] = y momentum of particle n in tile m ppart[m][n][5] = z momentum of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z, p2, gami; float scu[3*MXV*MYV*MZV]; /* float scu[3*(mx+1)*(my+1)*(mz+1)]; */ /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; ci2 = ci*ci; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,nm,lm,x,y,z,dxp,dyp, \ dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,p2,gami,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; /* zero out local accumulator */ for (j = 0; j < 3*mxyv*(mz+1); j++) { scu[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; /* find inverse gamma */ vx = ppart[3+idimp*(j+npoff)]; vy = ppart[4+idimp*(j+npoff)]; vz = ppart[5+idimp*(j+npoff)]; p2 = vx*vx + vy*vy + vz*vz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx *= gami; vy *= gami; vz *= gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*amz; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; dy = amy*dzp; nn += 3*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*dzp; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)]; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[5+idimp*(j+npoff)] = -ppart[5+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[4+idimp*(j+npoff)] = -ppart[4+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(i+mxv*j+mxyv*k)]; cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(i+mxv*j+mxyv*k)]; cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[3*(i+mxv*j)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+3*(i+mxv*j)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+3*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[3*(i+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+3*(i+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+3*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+3*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+3*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cgrjppostf3l(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP version using guard cells data deposited in tiles particles stored segmented array 79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores input: all except ncl, ihole, irc, output: ppart, cu, ncl, ihole, irc current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppart[m][n][3] = x momentum of particle n in tile m ppart[m][n][4] = y momentum of particle n in tile m ppart[m][n][5] = z momentum of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp; int i, j, k, l, ih, nh, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float ci2, x, y, z, p2, gami; float scu[3*MXV*MYV*MZV]; /* float scu[3*(mx+1)*(my+1)*(mz+1)]; */ /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; mxy1 = mx1*my1; ci2 = ci*ci; anx = (float) nx; any = (float) ny; anz = (float) nz; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nn,mm,ll,ih,nh,nm,lm,x,y,z, \ dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,p2,gami,edgelx,edgely, \ edgelz,edgerx,edgery,edgerz,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* zero out local accumulator */ for (j = 0; j < 3*mxyv*(mz+1); j++) { scu[j] = 0.0f; } /* clear counters */ for (j = 0; j < 26; j++) { ncl[j+26*l] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; z = ppart[2+idimp*(j+npoff)]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; /* find inverse gamma */ vx = ppart[3+idimp*(j+npoff)]; vy = ppart[4+idimp*(j+npoff)]; vz = ppart[5+idimp*(j+npoff)]; p2 = vx*vx + vy*vy + vz*vz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 3*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx *= gami; vy *= gami; vz *= gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*amz; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; dy = amy*dzp; nn += 3*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+3] += vx*dy; scu[nn+1+3] += vy*dy; scu[nn+2+3] += vz*dy; dy = dx1*dzp; mm = nn + 3*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+3] += vx*dy; scu[mm+1+3] += vy*dy; scu[mm+2+3] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; ppart[2+idimp*(j+npoff)] = dz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(i+mxv*j+mxyv*k)]; cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(i+mxv*j+mxyv*k)]; cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[3*(i+mxv*j)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+3*(i+mxv*j)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+3*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[3*(i+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+3*(i+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+3*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+3*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+3*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+3*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+3*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+3*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+3*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+3*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+3*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+3*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+3*(nm-1+mxv*j+mxyv*(lm-1))]; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cpporder3l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppbuff[l][n][i] = i co-ordinate of particle n in tile l kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, mxyz1, noff, moff, loff, npp, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll, isum; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dx, dy, dz; int ks[26]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; anx = (float) nx; any = (float) ny; anz = (float) nz; /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(j,k,l,noff,moff,loff,npp,nn,mm,ll,ih,nh,ist,dx,dy,dz,edgelx, \ edgely,edgelz,edgerx,edgery,edgerz) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; /* clear counters */ for (j = 0; j < 26; j++) { ncl[j+26*l] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { dx = ppart[idimp*(j+nppmx*l)]; dy = ppart[1+idimp*(j+nppmx*l)]; dz = ppart[2+idimp*(j+nppmx*l)]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[idimp*(j+nppmx*l)] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[idimp*(j+nppmx*l)] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[1+idimp*(j+nppmx*l)] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[1+idimp*(j+nppmx*l)] = dy; } else { ist += 3; } } if (dz >= edgerz) { if (dz >= anz) ppart[2+idimp*(j+nppmx*l)] = dz - anz; ist += 18; } else if (dz < edgelz) { if (dz < 0.0) { dz += anz; if (dz < anz) ist += 9; else dz = 0.0; ppart[2+idimp*(j+nppmx*l)] = dz; } else { ist += 9; } } if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,l,isum,ist,nh,ip,j1,ii) for (l = 0; l < mxyz1; l++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 26; j++) { ist = ncl[j+26*l]; ncl[j+26*l] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*l]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*l)]; ii = ncl[ist+26*l-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*l)] = ppart[i+idimp*(j1+nppmx*l)]; } } else { ip = 1; } ncl[ist+26*l-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,kx,ky,kz,kl,kr,kxl,kxr,lk,ll,lr,ih,nh,ncoff, \ ist,j1,j2,ip,ks) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*(ntmax+1)*l]; ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 26; ii++) { if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*l)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = npp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*l)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*l)] = ppart[i+idimp*(j1+nppmx*l)]; } } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void cpporderf3l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the cgppushf3l procedure. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = position z of particle n in tile m ppbuff[l][n][i] = i co-ordinate of particle n in tile l kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, mxyz1, npp, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, ll, isum; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; int ks[26]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,l,isum,ist,nh,ip,j1,ii) for (l = 0; l < mxyz1; l++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 26; j++) { ist = ncl[j+26*l]; ncl[j+26*l] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*l]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*l)]; ii = ncl[ist+26*l-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*l)] = ppart[i+idimp*(j1+nppmx*l)]; } } else { ip = 1; } ncl[ist+26*l-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,kx,ky,kz,kl,kr,kxl,kxr,lk,ll,lr,ih,nh,ncoff, \ ist,j1,j2,ip,ks) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*(ntmax+1)*l]; ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 26; ii++) { if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*l)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = npp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*l)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*l)] = ppart[i+idimp*(j1+nppmx*l)]; } } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void ccguard3l(float fxyz[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* replicate extended periodic vector field fxyz linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 local data */ int j, k, l, nxye3, ll; nxye3 = 3*nxe*nye; /* copy edges of extended field */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,ll) for (l = 0; l < nz; l++) { ll = nxye3*l; for (k = 0; k < ny; k++) { fxyz[3*(nx+nxe*k)+ll] = fxyz[3*nxe*k+ll]; fxyz[1+3*(nx+nxe*k)+ll] = fxyz[1+3*nxe*k+ll]; fxyz[2+3*(nx+nxe*k)+ll] = fxyz[2+3*nxe*k+ll]; } for (j = 0; j < nx; j++) { fxyz[3*(j+nxe*ny)+ll] = fxyz[3*j+ll]; fxyz[1+3*(j+nxe*ny)+ll] = fxyz[1+3*j+ll]; fxyz[2+3*(j+nxe*ny)+ll] = fxyz[2+3*j+ll]; } fxyz[3*(nx+nxe*ny)+ll] = fxyz[ll]; fxyz[1+3*(nx+nxe*ny)+ll] = fxyz[1+ll]; fxyz[2+3*(nx+nxe*ny)+ll] = fxyz[2+ll]; } #pragma omp for \ private(j,k) for (k = 0; k < ny; k++) { for (j = 0; j < nx; j++) { fxyz[3*(j+nxe*k)+nxye3*nz] = fxyz[3*(j+nxe*k)]; fxyz[1+3*(j+nxe*k)+nxye3*nz] = fxyz[1+3*(j+nxe*k)]; fxyz[2+3*(j+nxe*k)+nxye3*nz] = fxyz[2+3*(j+nxe*k)]; } fxyz[3*(nx+nxe*k)+nxye3*nz] = fxyz[3*nxe*k]; fxyz[1+3*(nx+nxe*k)+nxye3*nz] = fxyz[1+3*nxe*k]; fxyz[2+3*(nx+nxe*k)+nxye3*nz] = fxyz[2+3*nxe*k]; } } for (j = 0; j < nx; j++) { fxyz[3*(j+nxe*ny)+nxye3*nz] = fxyz[3*j]; fxyz[1+3*(j+nxe*ny)+nxye3*nz] = fxyz[1+3*j]; fxyz[2+3*(j+nxe*ny)+nxye3*nz] = fxyz[2+3*j]; } fxyz[3*(nx+nxe*ny)+nxye3*nz] = fxyz[0]; fxyz[1+3*(nx+nxe*ny)+nxye3*nz] = fxyz[1]; fxyz[2+3*(nx+nxe*ny)+nxye3*nz] = fxyz[2]; return; } /*--------------------------------------------------------------------*/ void cacguard3l(float cu[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic vector field cu linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 local data */ int j, k, l, nxye3, ll; nxye3 = 3*nxe*nye; /* accumulate edges of extended field */ #pragma omp parallel { #pragma omp for \ private(j,k,l,ll) for (l = 0; l < nz; l++) { ll = nxye3*l; for (k = 0; k < ny; k++) { cu[3*nxe*k+ll] += cu[3*(nx+nxe*k)+ll]; cu[1+3*nxe*k+ll] += cu[1+3*(nx+nxe*k)+ll]; cu[2+3*nxe*k+ll] += cu[2+3*(nx+nxe*k)+ll]; cu[3*(nx+nxe*k)+ll] = 0.0; cu[1+3*(nx+nxe*k)+ll] = 0.0; cu[2+3*(nx+nxe*k)+ll] = 0.0; } for (j = 0; j < nx; j++) { cu[3*j+ll] += cu[3*(j+nxe*ny)+ll]; cu[1+3*j+ll] += cu[1+3*(j+nxe*ny)+ll]; cu[2+3*j+ll] += cu[2+3*(j+nxe*ny)+ll]; cu[3*(j+nxe*ny)+ll] = 0.0; cu[1+3*(j+nxe*ny)+ll] = 0.0; cu[2+3*(j+nxe*ny)+ll] = 0.0; } cu[ll] += cu[3*(nx+nxe*ny)+ll]; cu[1+ll] += cu[1+3*(nx+nxe*ny)+ll]; cu[2+ll] += cu[2+3*(nx+nxe*ny)+ll]; cu[3*(nx+nxe*ny)+ll] = 0.0; cu[1+3*(nx+nxe*ny)+ll] = 0.0; cu[2+3*(nx+nxe*ny)+ll] = 0.0; } #pragma omp for \ private(j,k) for (k = 0; k < ny; k++) { for (j = 0; j < nx; j++) { cu[3*(j+nxe*k)] += cu[3*(j+nxe*k)+nxye3*nz]; cu[1+3*(j+nxe*k)] += cu[1+3*(j+nxe*k)+nxye3*nz]; cu[2+3*(j+nxe*k)] += cu[2+3*(j+nxe*k)+nxye3*nz]; cu[3*(j+nxe*k)+nxye3*nz] = 0.0; cu[1+3*(j+nxe*k)+nxye3*nz] = 0.0; cu[2+3*(j+nxe*k)+nxye3*nz] = 0.0; } cu[3*nxe*k] += cu[3*(nx+nxe*k)+nxye3*nz]; cu[1+3*nxe*k] += cu[1+3*(nx+nxe*k)+nxye3*nz]; cu[2+3*nxe*k] += cu[2+3*(nx+nxe*k)+nxye3*nz]; cu[3*(nx+nxe*k)+nxye3*nz] = 0.0; cu[1+3*(nx+nxe*k)+nxye3*nz] = 0.0; cu[2+3*(nx+nxe*k)+nxye3*nz] = 0.0; } } for (j = 0; j < nx; j++) { cu[3*j] += cu[3*(j+nxe*ny)+nxye3*nz]; cu[1+3*j] += cu[1+3*(j+nxe*ny)+nxye3*nz]; cu[2+3*j] += cu[2+3*(j+nxe*ny)+nxye3*nz]; cu[3*(j+nxe*ny)+nxye3*nz] = 0.0; cu[1+3*(j+nxe*ny)+nxye3*nz] = 0.0; cu[2+3*(j+nxe*ny)+nxye3*nz] = 0.0; } cu[0] += cu[3*(nx+nxe*ny)+nxye3*nz]; cu[1] += cu[1+3*(nx+nxe*ny)+nxye3*nz]; cu[2] += cu[2+3*(nx+nxe*ny)+nxye3*nz]; cu[3*(nx+nxe*ny)+nxye3*nz] = 0.0; cu[1+3*(nx+nxe*ny)+nxye3*nz] = 0.0; cu[2+3*(nx+nxe*ny)+nxye3*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void caguard3l(float q[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic scalar field q linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 local data */ int j, k, l, nxye, ll; nxye = nxe*nye; /* accumulate edges of extended field */ #pragma omp parallel { #pragma omp for \ private(j,k,l,ll) for (l = 0; l < nz; l++) { ll = nxye*l; for (k = 0; k < ny; k++) { q[nxe*k+ll] += q[nx+nxe*k+ll]; q[nx+nxe*k+ll] = 0.0; } for (j = 0; j < nx; j++) { q[j+ll] += q[j+nxe*ny+ll]; q[j+nxe*ny+ll] = 0.0; } q[ll] += q[nx+nxe*ny+ll]; q[nx+nxe*ny+ll] = 0.0; } #pragma omp for \ private(j,k) for (k = 0; k < ny; k++) { for (j = 0; j < nx; j++) { q[j+nxe*k] += q[j+nxe*k+nxye*nz]; q[j+nxe*k+nxye*nz] = 0.0; } q[nxe*k] += q[nx+nxe*k+nxye*nz]; q[nx+nxe*k+nxye*nz] = 0.0; } } for (j = 0; j < nx; j++) { q[j] += q[j+nxe*ny+nxye*nz]; q[j+nxe*ny+nxye*nz] = 0.0; } q[0] += q[nx+nxe*ny+nxye*nz]; q[nx+nxe*ny+nxye*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void cmpois33(float complex q[], float complex fxyz[], int isign, float complex ffc[], float ax, float ay, float az, float affp, float *we, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. for isign = 0, output: ffc input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd for isign = -1, output: fxyz, we input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd approximate flop count is: 59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated equation used is: fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx], fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx], fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx], where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx], s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0, fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0, fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0, fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0. q[l][k][j] = complex charge density for fourier mode (j,k,l) fxyz[l][k][j][0] = x component of complex force/charge fxyz[l][k][j][1] = y component of complex force/charge fxyz[l][k][j][2] = z component of complex force/charge all for fourier mode (j,k,l) cimag(ffc[l][k][j]) = finite-size particle shape factor s for fourier mode (j,k,l) creal(ffc[l][k][j]) = potential green's function g for fourier mode (j,k,l) ax/ay/az = half-width of particle in x/y/z direction affp = normalization constant = nx*ny*nz/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))* |q[kz][ky][kx]*s[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nzv = third dimension of field arrays, must be >= nz nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh local data */ int nxh, nyh, nzh, j, k, l, k1, l1, kk, kj, ll, lj, nxyhd, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6; float complex zero, zt1, zt2; double wp, sum1, sum2; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; if (isign != 0) goto L40; /* prepare form factor array */ for (l = 0; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; at1 = dkz*dkz; at2 = pow((dkz*az),2); for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at3 = dky*dky + at1; at4 = pow((dky*ay),2) + at2; for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at5 = dkx*dkx + at3; at6 = exp(-0.5*(pow((dkx*ax),2) + at4)); if (at5==0.0) { ffc[j+kk+ll] = affp + 1.0*_Complex_I; } else { ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I; } } } } return; /* calculate force/charge and sum field energy */ L40: sum1 = 0.0; /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp) \ reduction(+:sum1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; wp = 0.0; for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; fxyz[3*(j+kj+lj)] = at2*zt1; fxyz[1+3*(j+kj+lj)] = at3*zt1; fxyz[2+3*(j+kj+lj)] = at4*zt1; fxyz[3*(j+k1+lj)] = at2*zt2; fxyz[1+3*(j+k1+lj)] = -at3*zt2; fxyz[2+3*(j+k1+lj)] = at4*zt2; zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; fxyz[3*(j+kj+l1)] = at2*zt1; fxyz[1+3*(j+kj+l1)] = at3*zt1; fxyz[2+3*(j+kj+l1)] = -at4*zt1; fxyz[3*(j+k1+l1)] = at2*zt2; fxyz[1+3*(j+k1+l1)] = -at3*zt2; fxyz[2+3*(j+k1+l1)] = -at4*zt2; wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) + q[j+k1+lj]*conjf(q[j+k1+lj]) + q[j+kj+l1]*conjf(q[j+kj+l1]) + q[j+k1+l1]*conjf(q[j+k1+l1])); } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I; zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I; fxyz[3*(kj+lj)] = zero; fxyz[1+3*(kj+lj)] = at3*zt1; fxyz[2+3*(kj+lj)] = at4*zt1; fxyz[3*(k1+lj)] = zero; fxyz[1+3*(k1+lj)] = zero; fxyz[2+3*(k1+lj)] = zero; fxyz[3*(kj+l1)] = zero; fxyz[1+3*(kj+l1)] = at3*zt2; fxyz[2+3*(kj+l1)] = -at4*zt2; fxyz[3*(k1+l1)] = zero; fxyz[1+3*(k1+l1)] = zero; fxyz[2+3*(k1+l1)] = zero; wp += at1*(q[kj+lj]*conjf(q[kj+lj]) + q[kj+l1]*conjf(q[kj+l1])); } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; fxyz[3*(j+lj)] = at2*zt1; fxyz[1+3*(j+lj)] = zero; fxyz[2+3*(j+lj)] = at4*zt1; fxyz[3*(j+k1+lj)] = zero; fxyz[1+3*(j+k1+lj)] = zero; fxyz[2+3*(j+k1+lj)] = zero; fxyz[3*(j+l1)] = at2*zt2; fxyz[1+3*(j+l1)] = zero; fxyz[2+3*(j+l1)] = -at4*zt2; fxyz[3*(j+k1+l1)] = zero; fxyz[1+3*(j+k1+l1)] = zero; fxyz[2+3*(j+k1+l1)] = zero; wp += at1*(q[j+lj]*conjf(q[j+lj]) + q[j+l1]*conjf(q[j+l1])); } /* mode numbers kx = 0, nx/2 */ at1 = crealf(ffc[ll])*cimagf(ffc[ll]); at4 = dkz*at1; zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I; fxyz[3*lj] = zero; fxyz[1+3*lj] = zero; fxyz[2+3*lj] = at4*zt1; fxyz[3*(k1+lj)] = zero; fxyz[1+3*(k1+lj)] = zero; fxyz[2+3*(k1+lj)] = zero; fxyz[3*l1] = zero; fxyz[1+3*l1] = zero; fxyz[2+3*l1] = zero; fxyz[3*(k1+l1)] = zero; fxyz[1+3*(k1+l1)] = zero; fxyz[2+3*(k1+l1)] = zero; wp += at1*(q[lj]*conjf(q[lj])); sum1 += wp; } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum2 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \ reduction(+:sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxyz[3*(j+kj)] = at2*zt1; fxyz[1+3*(j+kj)] = at3*zt1; fxyz[2+3*(j+kj)] = zero; fxyz[3*(j+k1)] = at2*zt2; fxyz[1+3*(j+k1)] = -at3*zt2; fxyz[2+3*(j+k1)] = zero; fxyz[3*(j+kj+l1)] = zero; fxyz[1+3*(j+kj+l1)] = zero; fxyz[2+3*(j+kj+l1)] = zero; fxyz[3*(j+k1+l1)] = zero; fxyz[1+3*(j+k1+l1)] = zero; fxyz[2+3*(j+k1+l1)] = zero; wp += at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); } /* mode numbers kx = 0, nx/2 */ at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxyz[3*kj] = zero; fxyz[1+3*kj] = at3*zt1; fxyz[2+3*kj] = zero; fxyz[3*k1] = zero; fxyz[1+3*k1] = zero; fxyz[2+3*k1] = zero; fxyz[3*(kj+l1)] = zero; fxyz[1+3*(kj+l1)] = zero; fxyz[2+3*(kj+l1)] = zero; fxyz[3*(k1+l1)] = zero; fxyz[1+3*(k1+l1)] = zero; fxyz[2+3*(k1+l1)] = zero; wp += at1*(q[kj]*conjf(q[kj])); sum2 += wp; } wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxyz[3*j] = at2*zt1; fxyz[1+3*j] = zero; fxyz[2+3*j] = zero; fxyz[3*(j+k1)] = zero; fxyz[1+3*(j+k1)] = zero; fxyz[2+3*(j+k1)] = zero; fxyz[3*(j+l1)] = zero; fxyz[1+3*(j+l1)] = zero; fxyz[2+3*(j+l1)] = zero; fxyz[3*(j+k1+l1)] = zero; fxyz[1+3*(j+k1+l1)] = zero; fxyz[2+3*(j+k1+l1)] = zero; wp += at1*(q[j]*conjf(q[j])); } fxyz[0] = zero; fxyz[1] = zero; fxyz[2] = zero; fxyz[3*k1] = zero; fxyz[1+3*k1] = zero; fxyz[2+3*k1] = zero; fxyz[3*l1] = zero; fxyz[1+3*l1] = zero; fxyz[2+3*l1] = zero; fxyz[3*(k1+l1)] = zero; fxyz[1+3*(k1+l1)] = zero; fxyz[2+3*(k1+l1)] = zero; *we = (sum1 + sum2 + wp)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void cmcuperp3(float complex cu[], int nx, int ny, int nz, int nxvh, int nyv, int nzv) { /* this subroutine calculates the transverse current in fourier space input: all, output: cu approximate flop count is: 100*nxc*nyc*nzc + 36*(nxc*nyc + nxc*nzc + nyc*nzc) and (nx/2)*nyc*nzc divides where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the transverse current is calculated using the equation: cux[kz][ky][kx] = cux[kz][ky][kx] - kx*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) cuy[kz][ky][kx] = cuy[kz][ky][kx] - ky*(kx*cux([kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) cuz[kz][ky][kx] = cuz[kz][ky][kx] - kz*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, except for cux(kx=pi) = cuy(kx=pi) = cuz(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = cux(ky=pi) = 0, cux(kz=pi) = cuy(kz=pi) = cuz(kz=pi) = 0, cux(kx=0,ky=0,kz=0) = cuy(kx=0,ky=0,kz=0) = cuz(kx=0,ky=0,kz=0) = 0. cu[l][k][j][i] = complex current density for fourier mode (j,k,l) nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz local data */ int nxh, nyh, nzh, j, k, l, k1, l1, kj, lj, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, dky2, dkz2, dkyz2, at1; float complex zero, zt1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; /* calculate transverse part of current */ /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,lj,kj,dkx,dky,dkz,dkz2,dkyz2,at1,zt1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; lj = nxvyh*l; l1 = nxvyh*nz - lj; dkz2 = dkz*dkz; for (k = 1; k < nyh; k++) { dky = dny*(float) k; kj = nxvh*k; k1 = nxvh*ny - kj; dkyz2 = dky*dky + dkz2; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dkyz2); zt1 = at1*(dkx*cu[3*(j+kj+lj)] + dky*cu[1+3*(j+kj+lj)] + dkz*cu[2+3*(j+kj+lj)]); cu[3*(j+kj+lj)] -= dkx*zt1; cu[1+3*(j+kj+lj)] -= dky*zt1; cu[2+3*(j+kj+lj)] -= dkz*zt1; zt1 = at1*(dkx*cu[3*(j+k1+lj)] - dky*cu[1+3*(j+k1+lj)] + dkz*cu[2+3*(j+k1+lj)]); cu[3*(j+k1+lj)] -= dkx*zt1; cu[1+3*(j+k1+lj)] += dky*zt1; cu[2+3*(j+k1+lj)] -= dkz*zt1; zt1 = at1*(dkx*cu[3*(j+kj+l1)] + dky*cu[1+3*(j+kj+l1)] - dkz*cu[2+3*(j+kj+l1)]); cu[3*(j+kj+l1)] -= dkx*zt1; cu[1+3*(j+kj+l1)] -= dky*zt1; cu[2+3*(j+kj+l1)] += dkz*zt1; zt1 = at1*(dkx*cu[3*(j+k1+l1)] - dky*cu[1+3*(j+k1+l1)] - dkz*cu[2+3*(j+k1+l1)]); cu[3*(j+k1+l1)] -= dkx*zt1; cu[1+3*(j+k1+l1)] += dky*zt1; cu[2+3*(j+k1+l1)] += dkz*zt1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kj = nxvh*k; k1 = nxvh*ny - kj; dky = dny*(float) k; at1 = 1.0/(dky*dky + dkz2); zt1 = at1*(dky*cu[1+3*(kj+lj)] + dkz*cu[2+3*(kj+lj)]); cu[1+3*(kj+lj)] -= dky*zt1; cu[2+3*(kj+lj)] -= dkz*zt1; cu[3*(k1+lj)] = zero; cu[1+3*(k1+lj)] = zero; cu[2+3*(k1+lj)] = zero; zt1 = at1*(dky*cu[1+3*(kj+l1)] - dkz*cu[2+3*(kj+l1)]); cu[1+3*(kj+l1)] -= dky*zt1; cu[2+3*(kj+l1)] += dkz*zt1; cu[3*(k1+l1)] = zero; cu[1+3*(k1+l1)] = zero; cu[2+3*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dkz2); zt1 = at1*(dkx*cu[3*(j+lj)] + dkz*cu[2+3*(j+lj)]); cu[3*(j+lj)] -= dkx*zt1; cu[2+3*(j+lj)] -= dkz*zt1; cu[3*(j+k1+lj)] = zero; cu[1+3*(j+k1+lj)] = zero; cu[2+3*(j+k1+lj)] = zero; zt1 = at1*(dkx*cu[3*(j+l1)] - dkz*cu[2+3*(j+l1)]); cu[3*(j+l1)] -= dkx*zt1; cu[2+3*(j+l1)] += dkz*zt1; cu[3*(j+k1+l1)] = zero; cu[1+3*(j+k1+l1)] = zero; cu[2+3*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ cu[2+3*lj] = zero; cu[3*(k1+lj)] = zero; cu[1+3*(k1+lj)] = zero; cu[2+3*(k1+lj)] = zero; cu[3*l1] = zero; cu[1+3*l1] = zero; cu[2+3*l1] = zero; cu[3*(k1+l1)] = zero; cu[1+3*(k1+l1)] = zero; cu[2+3*(k1+l1)] = zero; } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; for (k = 1; k < nyh; k++) { dky = dny*(float) k; kj = nxvh*k; k1 = nxvh*ny - kj; dky2 = dky*dky; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dky2); zt1 = at1*(dkx*cu[3*(j+kj)] + dky*cu[1+3*(j+kj)]); cu[3*(j+kj)] -= dkx*zt1; cu[1+3*(j+kj)] -= dky*zt1; zt1 = at1*(dkx*cu[3*(j+k1)]- dky*cu[1+3*(j+k1)]); cu[3*(j+k1)] -= dkx*zt1; cu[1+3*(j+k1)] += dky*zt1; cu[3*(j+kj+l1)] = zero; cu[1+3*(j+kj+l1)] = zero; cu[2+3*(j+kj+l1)] = zero; cu[3*(j+k1+l1)] = zero; cu[1+3*(j+k1+l1)] = zero; cu[2+3*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ cu[1+3*kj] = zero; cu[3*k1] = zero; cu[1+3*k1] = zero; cu[2+3*k1] = zero; cu[3*(kj+l1)] = zero; cu[1+3*(kj+l1)] = zero; cu[2+3*(kj+l1)] = zero; cu[3*(k1+l1)] = zero; cu[1+3*(k1+l1)] = zero; cu[2+3*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { cu[3*j] = zero; cu[3*(j+k1)] = zero; cu[1+3*(j+k1)] = zero; cu[2+3*(j+k1)] = zero; cu[3*(j+l1)] = zero; cu[1+3*(j+l1)] = zero; cu[2+3*(j+l1)] = zero; cu[3*(j+k1+l1)] = zero; cu[1+3*(j+k1+l1)] = zero; cu[2+3*(j+k1+l1)] = zero; } cu[0] = zero; cu[1] = zero; cu[2] = zero; cu[3*k1] = zero; cu[1+3*k1] = zero; cu[2+3*k1] = zero; cu[3*l1] = zero; cu[1+3*l1] = zero; cu[2+3*l1] = zero; cu[3*(k1+l1)] = zero; cu[1+3*(k1+l1)] = zero; cu[2+3*(k1+l1)] = zero; return; } /*--------------------------------------------------------------------*/ void cmibpois33(float complex cu[], float complex bxyz[], float complex ffc[], float ci, float *wm, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for magnetic field with periodic boundary conditions. input: cu,ffc,ci,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd output: bxyz, wm approximate flop count is: 193*nxc*nyc*nzc + 84*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the magnetic field is calculated using the equations: bx[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (ky*cuz[kz][ky][kx]-kz*cuy[kz][ky][kx]), by[kz][ky][kx] = ci*ci*sqrt(-1)*g([kz][ky][kx]* (kz*cux[kz][ky][kx]-kx*cuz[kz][ky][kx]), bz[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (kx*cuy[kz][ky][kx]-ky*cux[kz][ky][kx]), where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g(kx,ky,kz) = (affp/(kx**2+ky**2+kz**2))*s(kx,ky,kz), s(kx,ky,kz) = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for bx(kx=pi) = by(kx=pi) = bz(kx=pi) = 0, bx(ky=pi) = by(ky=pi) = bx(ky=pi) = 0, bx(kz=pi) = by(kz=pi) = bz(kz=pi) = 0, bx(kx=0,ky=0,kz=0) = by(kx=0,ky=0,kz=0) = bz(kx=0,ky=0,kz=0) = 0. cu[l][k][j][i] = complex current density for fourier mode (j,k,l) bxyz[l][k][j][i] = i component of complex magnetic field all for fourier mode (j,k,l) cimag(ffc[l][k][j]) = finite-size particle shape factor s for fourier mode (j,k,l) creal(ffc[l][k][j]) = potential green's function g for fourier mode (j,k,l) ci = reciprocal of velocity of light magnetic field energy is also calculated, using wm = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*ci*ci |cu[kz][ky][kx]*s[kz][ky][kx]|**2) this expression is valid only if the current is divergence-free nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz nxhd = dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh local data */ int nxh, nyh, nzh, j, k, l, k1, l1, kk, kj, ll, lj, nxyhd, nxvyh; float dnx, dny, dnz, dky, dkz, ci2, at1, at2, at3, at4; float complex zero, zt1, zt2, zt3; double wp, sum1, sum2; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; ci2 = ci*ci; /* calculate magnetic field and sum field energy */ sum1 = 0.0; /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,zt3,wp) \ reduction(+:sum1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; wp = 0.0; for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 1; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; at1 = at1*cimagf(ffc[j+kk+ll]); zt1 = -cimagf(cu[2+3*(j+kj+lj)]) + crealf(cu[2+3*(j+kj+lj)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+kj+lj)]) + crealf(cu[1+3*(j+kj+lj)])*_Complex_I; zt3 = -cimagf(cu[3*(j+kj+lj)]) + crealf(cu[3*(j+kj+lj)])*_Complex_I; bxyz[3*(j+kj+lj)] = at3*zt1 - at4*zt2; bxyz[1+3*(j+kj+lj)] = at4*zt3 - at2*zt1; bxyz[2+3*(j+kj+lj)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+3*(j+k1+lj)]) + crealf(cu[2+3*(j+k1+lj)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+k1+lj)]) + crealf(cu[1+3*(j+k1+lj)])*_Complex_I; zt3 = -cimagf(cu[3*(j+k1+lj)]) + crealf(cu[3*(j+k1+lj)])*_Complex_I; bxyz[3*(j+k1+lj)] = -at3*zt1 - at4*zt2; bxyz[1+3*(j+k1+lj)] = at4*zt3 - at2*zt1; bxyz[2+3*(j+k1+lj)] = at2*zt2 + at3*zt3; zt1 = -cimagf(cu[2+3*(j+kj+l1)]) + crealf(cu[2+3*(j+kj+l1)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+kj+l1)]) + crealf(cu[1+3*(j+kj+l1)])*_Complex_I; zt3 = -cimagf(cu[3*(j+kj+l1)]) + crealf(cu[3*(j+kj+l1)])*_Complex_I; bxyz[3*(j+kj+l1)] = at3*zt1 + at4*zt2; bxyz[1+3*(j+kj+l1)] = -at4*zt3 - at2*zt1; bxyz[2+3*(j+kj+l1)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+3*(j+k1+l1)]) + crealf(cu[2+3*(j+k1+l1)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+k1+l1)]) + crealf(cu[1+3*(j+k1+l1)])*_Complex_I; zt3 = -cimagf(cu[3*(j+k1+l1)]) + crealf(cu[3*(j+k1+l1)])*_Complex_I; bxyz[3*(j+k1+l1)] = -at3*zt1 + at4*zt2; bxyz[1+3*(j+k1+l1)] = -at4*zt3 - at2*zt1; bxyz[2+3*(j+k1+l1)] = at2*zt2 + at3*zt3; wp += at1*(cu[3*(j+kj+lj)]*conjf(cu[3*(j+kj+lj)]) + cu[1+3*(j+kj+lj)]*conjf(cu[1+3*(j+kj+lj)]) + cu[2+3*(j+kj+lj)]*conjf(cu[2+3*(j+kj+lj)]) + cu[3*(j+k1+lj)]*conjf(cu[3*(j+k1+lj)]) + cu[1+3*(j+k1+lj)]*conjf(cu[1+3*(j+k1+lj)]) + cu[2+3*(j+k1+lj)]*conjf(cu[2+3*(j+k1+lj)]) + cu[3*(j+kj+l1)]*conjf(cu[3*(j+kj+l1)]) + cu[1+3*(j+kj+l1)]*conjf(cu[1+3*(j+kj+l1)]) + cu[2+3*(j+kj+l1)]*conjf(cu[2+3*(j+kj+l1)]) + cu[3*(j+k1+l1)]*conjf(cu[3*(j+k1+l1)]) + cu[1+3*(j+k1+l1)]*conjf(cu[1+3*(j+k1+l1)]) + cu[2+3*(j+k1+l1)]*conjf(cu[2+3*(j+k1+l1)])); } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = ci2*crealf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; at1 = at1*cimagf(ffc[kk+ll]); zt1 = -cimagf(cu[2+3*(kj+lj)]) + crealf(cu[2+3*(kj+lj)])*_Complex_I; zt2 = -cimagf(cu[1+3*(kj+lj)]) + crealf(cu[1+3*(kj+lj)])*_Complex_I; zt3 = -cimagf(cu[3*(kj+lj)]) + crealf(cu[3*(kj+lj)])*_Complex_I; bxyz[3*(kj+lj)] = at3*zt1 - at4*zt2; bxyz[1+3*(kj+lj)] = at4*zt3; bxyz[2+3*(kj+lj)] = -at3*zt3; bxyz[3*(k1+lj)] = zero; bxyz[1+3*(k1+lj)] = zero; bxyz[2+3*(k1+lj)] = zero; zt1 = -cimagf(cu[2+3*(kj+l1)]) + crealf(cu[2+3*(kj+l1)])*_Complex_I; zt2 = -cimagf(cu[1+3*(kj+l1)]) + crealf(cu[1+3*(kj+l1)])*_Complex_I; zt3 = -cimagf(cu[3*(kj+l1)]) + crealf(cu[3*(kj+l1)])*_Complex_I; bxyz[3*(kj+l1)] = at3*zt1 + at4*zt2; bxyz[1+3*(kj+l1)] = -at4*zt3; bxyz[2+3*(kj+l1)] = -at3*zt3; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; wp += at1*(cu[3*(kj+lj)]*conjf(cu[3*(kj+lj)]) + cu[1+3*(kj+lj)]*conjf(cu[1+3*(kj+lj)]) + cu[2+3*(kj+lj)]*conjf(cu[2+3*(kj+lj)]) + cu[3*(kj+l1)]*conjf(cu[3*(kj+l1)]) + cu[1+3*(kj+l1)]*conjf(cu[1+3*(kj+l1)]) + cu[2+3*(kj+l1)]*conjf(cu[2+3*(kj+l1)])); } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = ci2*crealf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; at1 = at1*cimagf(ffc[j+ll]); zt1 = -cimagf(cu[2+3*(j+lj)]) + crealf(cu[2+3*(j+lj)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+lj)]) + crealf(cu[1+3*(j+lj)])*_Complex_I; zt3 = -cimagf(cu[3*(j+lj)]) + crealf(cu[3*(j+lj)])*_Complex_I; bxyz[3*(j+lj)] = -at4*zt2; bxyz[1+3*(j+lj)] = at4*zt3 - at2*zt1; bxyz[2+3*(j+lj)] = at2*zt2; bxyz[3*(j+k1+lj)] = zero; bxyz[1+3*(j+k1+lj)] = zero; bxyz[2+3*(j+k1+lj)] = zero; zt1 = -cimagf(cu[2+3*(j+l1)]) + crealf(cu[2+3*(j+l1)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+l1)]) + crealf(cu[1+3*(j+l1)])*_Complex_I; zt3 = -cimagf(cu[3*(j+l1)]) + crealf(cu[3*(j+l1)])*_Complex_I; bxyz[3*(j+l1)] = at4*zt2; bxyz[1+3*(j+l1)] = -at4*zt3 - at2*zt1; bxyz[2+3*(j+l1)] = at2*zt2; bxyz[3*(j+k1+l1)] = zero; bxyz[1+3*(j+k1+l1)] = zero; bxyz[2+3*(j+k1+l1)] = zero; wp += at1*(cu[3*(j+lj)]*conjf(cu[3*(j+lj)]) + cu[1+3*(j+lj)]*conjf(cu[1+3*(j+lj)]) + cu[2+3*(j+lj)]*conjf(cu[2+3*(j+lj)]) + cu[3*(j+l1)]*conjf(cu[3*(j+l1)]) + cu[1+3*(j+l1)]*conjf(cu[1+3*(j+l1)]) + cu[2+3*(j+l1)]*conjf(cu[2+3*(j+l1)])); } /* mode numbers kx = 0, nx/2 */ at1 = ci2*crealf(ffc[ll]); at4 = dkz*at1; at1 = at1*cimagf(ffc[ll]); zt2 = -cimagf(cu[1+3*(lj)]) + crealf(cu[1+3*(lj)])*_Complex_I; zt3 = -cimagf(cu[3*(lj)]) + crealf(cu[3*(lj)])*_Complex_I; bxyz[3*lj] = -at4*zt2; bxyz[1+3*lj] = at4*zt3; bxyz[2+3*lj] = zero; bxyz[3*(k1+lj)] = zero; bxyz[1+3*(k1+lj)] = zero; bxyz[2+3*(k1+lj)] = zero; bxyz[3*l1] = zero; bxyz[1+3*l1] = zero; bxyz[2+3*l1] = zero; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; wp += at1*(cu[3*lj]*conjf(cu[3*lj]) + cu[1+3*lj]*conjf(cu[1+3*lj]) + cu[2+3*lj]*conjf(cu[2+3*lj])); sum1 += wp; } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum2 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,zt3,wp) \ reduction(+:sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; at1 = at1*cimagf(ffc[j+kk]); zt1 = -cimagf(cu[2+3*(j+kj)]) + crealf(cu[2+3*(j+kj)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+kj)]) + crealf(cu[1+3*(j+kj)])*_Complex_I; zt3 = -cimagf(cu[3*(j+kj)]) + crealf(cu[3*(j+kj)])*_Complex_I; bxyz[3*(j+kj)] = at3*zt1; bxyz[1+3*(j+kj)] = -at2*zt1; bxyz[2+3*(j+kj)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+3*(j+k1)]) + crealf(cu[2+3*(j+k1)])*_Complex_I; zt2 = -cimagf(cu[1+3*(j+k1)]) + crealf(cu[1+3*(j+k1)])*_Complex_I; zt3 = -cimagf(cu[3*(j+k1)]) + crealf(cu[3*(j+k1)])*_Complex_I; bxyz[3*(j+k1)] = -at3*zt1; bxyz[1+3*(j+k1)] = -at2*zt1; bxyz[2+3*(j+k1)] = at2*zt2 + at3*zt3; bxyz[3*(j+kj+l1)] = zero; bxyz[1+3*(j+kj+l1)] = zero; bxyz[2+3*(j+kj+l1)] = zero; bxyz[3*(j+k1+l1)] = zero; bxyz[1+3*(j+k1+l1)] = zero; bxyz[2+3*(j+k1+l1)] = zero; wp += at1*(cu[3*(j+kj)]*conjf(cu[3*(j+kj)]) + cu[1+3*(j+kj)]*conjf(cu[1+3*(j+kj)]) + cu[2+3*(j+kj)]*conjf(cu[2+3*(j+kj)]) + cu[3*(j+k1)]*conjf(cu[3*(j+k1)]) + cu[1+3*(j+k1)]*conjf(cu[1+3*(j+k1)]) + cu[2+3*(j+k1)]*conjf(cu[2+3*(j+k1)])); } /* mode numbers kx = 0, nx/2 */ at1 = ci2*crealf(ffc[kk]); at3 = at1*dny*(float) k; at1 = at1*cimagf(ffc[kk]); zt1 = -cimagf(cu[2+3*(kj)]) + crealf(cu[2+3*(kj)])*_Complex_I; zt3 = -cimagf(cu[3*(kj)]) + crealf(cu[3*(kj)])*_Complex_I; bxyz[3*kj] = at3*zt1; bxyz[1+3*kj] = zero; bxyz[2+3*kj] = -at3*zt3; bxyz[3*k1] = zero; bxyz[1+3*k1] = zero; bxyz[2+3*k1] = zero; bxyz[3*(kj+l1)] = zero; bxyz[1+3*(kj+l1)] = zero; bxyz[2+3*(kj+l1)] = zero; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; wp += at1*(cu[3*kj]*conjf(cu[3*kj]) + cu[1+3*kj]*conjf(cu[1+3*kj]) + cu[2+3*kj]*conjf(cu[2+3*kj])); sum2 += wp; } wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = ci2*crealf(ffc[j]); at2 = at1*dnx*(float) j; at1 = at1*cimagf(ffc[j]); zt1 = -cimagf(cu[2+3*j]) + crealf(cu[2+3*j])*_Complex_I; zt2 = -cimagf(cu[1+3*j]) + crealf(cu[1+3*j])*_Complex_I; bxyz[3*j] = zero; bxyz[1+3*j] = -at2*zt1; bxyz[2+3*j] = at2*zt2; bxyz[3*(j+k1)] = zero; bxyz[1+3*(j+k1)] = zero; bxyz[2+3*(j+k1)] = zero; bxyz[3*(j+l1)] = zero; bxyz[1+3*(j+l1)] = zero; bxyz[2+3*(j+l1)] = zero; bxyz[3*(j+k1+l1)] = zero; bxyz[1+3*(j+k1+l1)] = zero; bxyz[2+3*(j+k1+l1)] = zero; wp += at1*(cu[3*j]*conjf(cu[3*j]) + cu[1+3*j]*conjf(cu[1+3*j]) + cu[2+3*j]*conjf(cu[2+3*j])); } bxyz[0] = zero; bxyz[1] = zero; bxyz[2] = zero; bxyz[3*k1] = zero; bxyz[1+3*k1] = zero; bxyz[2+3*k1] = zero; bxyz[3*l1] = zero; bxyz[1+3*l1] = zero; bxyz[2+3*l1] = zero; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; *wm = (sum1 + sum2 + wp)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void cmmaxwel3(float complex exyz[], float complex bxyz[], float complex cu[], float complex ffc[], float ci, float dt, float *wf, float *wm, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d maxwell's equation in fourier space for transverse electric and magnetic fields with periodic boundary conditions. input: all, output: wf, wm, exyz, bxyz approximate flop count is: 680*nxc*nyc*nzc + 149*(nxc*nyc + nxc*nzc + nyc*nzc) plus nxc*nyc*nzc divides where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the magnetic field is first updated half a step using the equations: bx[kz][ky][kx] = bx[kz][ky][kx] - .5*dt*sqrt(-1)* (ky*ez[kz][ky][kx]-kz*ey[kz][ky][kx]) by[kz][ky][kx] = by[kz][ky][kx] - .5*dt*sqrt(-1)* (kz*ex[kz][ky][kx]-kx*ez[kz][ky][kx]) bz[kz][ky][kx] = bz[kz][ky][kx] - .5*dt*sqrt(-1)* (kx*ey[kz][ky][kx]-ky*ex[kz][ky][kx]) the electric field is then updated a whole step using the equations: ex[kz][ky][kx] = ex[kz][ky][kx] + c2*dt*sqrt(-1)* (ky*bz[kz][ky][kx]-kz*by[kz][ky][kx]) - affp*dt*cux[kz][ky][kx]*s[kz][ky][kx] ey[kz][ky][kx] = ey[kz][ky][kx] + c2*dt*sqrt(-1)* (kz*bx[kz][ky][kx]-kx*bz[kz][ky][kx]) - affp*dt*cuy[kz][ky][kx]*s[kz][ky][kx] ez[kz][ky][kx] = ez[kz][ky][kx] + c2*dt*sqrt(-1)* (kx*by[kz][ky][kx]-ky*bx[kz][ky][kx]) - affp*dt*cuz[kz][ky][kx]*s[kz][ky][kx] the magnetic field is finally updated the remaining half step with the new electric field and the previous magnetic field equations. where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, c2 = 1./(ci*ci) and s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2) j,k,l = fourier mode numbers, except for ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0, ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0, ex(kz=pi) = ey(kz=pi) = ez(kz=pi) = 0, ex(kx=0,ky=0,kz=0) = ey(kx=0,ky=0,kz=0) = ez(kx=0,ky=0,kz=0) = 0. and similarly for bx, by, bz. cu[l][k][j][i] = complex current density exyz[l][k][j][i] = complex transverse electric field bxyz[l][k][j][i] = complex magnetic field for component i, all for fourier mode (j,k,l) creal(ffc[0][0][0]) = affp = normalization constant = nx*ny*nz/np, where np=number of particles cimag(ffc[l][k][j]) = finite-size particle shape factor s, s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2) for fourier mode (j,k,l) ci = reciprocal of velocity of light dt = time interval between successive calculations transverse electric field energy is also calculated, using wf = nx*ny*nz**sum((1/affp)*|exyz[kz][ky][kx]|**2) magnetic field energy is also calculated, using wm = nx*ny*nz**sum((c2/affp)*|bxyz[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz nxhd = second dimension of form factor array, must be >= nxh nyhd = third dimension of form factor array, must be >= nyh nzhd = fourth dimension of form factor array, must be >= nzh local data */ int nxh, nyh, nzh, j, k, l, k1, l1, kk, kj, ll, lj, nxyhd, nxvyh; float dnx, dny, dnz, dth, c2, cdt, affp, anorm, dkx, dky, dkz; float adt, afdt; float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9; double wp, ws, sum1, sum2, sum3, sum4; if (ci <= 0.0) return; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; dth = 0.5f*dt; c2 = 1.0f/(ci*ci); cdt = c2*dt; affp = creal(ffc[0]); adt = affp*dt; zero = 0.0 + 0.0*_Complex_I; anorm = 1.0f/affp; /* update electromagnetic field and sum field energies */ sum1 = 0.0; sum2 = 0.0; /* calculate the electromagnetic fields */ /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dkz,dky,dkx,afdt,zt1,zt2,zt3,zt4,zt5, \ zt6,zt7,zt8,zt9,ws,wp) \ reduction(+:sum1,sum2) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; ws = 0.0; wp = 0.0; for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk+ll]); /* update magnetic field half time step, ky > 0, kz > 0 */ zt1 = -cimagf(exyz[2+3*(j+kj+lj)]) + crealf(exyz[2+3*(j+kj+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+kj+lj)]) + crealf(exyz[1+3*(j+kj+lj)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+kj+lj)]) + crealf(exyz[3*(j+kj+lj)])*_Complex_I; zt4 = bxyz[3*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+3*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+3*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) - afdt*cu[3*(j+kj+lj)]; zt8 = exyz[1+3*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+3*(j+kj+lj)]; zt9 = exyz[2+3*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+3*(j+kj+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+kj+lj)] = zt7; exyz[1+3*(j+kj+lj)] = zt8; exyz[2+3*(j+kj+lj)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1 - dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[3*(j+kj+lj)] = zt4; bxyz[1+3*(j+kj+lj)] = zt5; bxyz[2+3*(j+kj+lj)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); /* update magnetic field half time step, ky < 0, kz > 0 */ zt1 = -cimagf(exyz[2+3*(j+k1+lj)]) + crealf(exyz[2+3*(j+k1+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+k1+lj)]) + crealf(exyz[1+3*(j+k1+lj)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+k1+lj)]) + crealf(exyz[3*(j+k1+lj)])*_Complex_I; zt4 = bxyz[3*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+3*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+3*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) - afdt*cu[3*(j+k1+lj)]; zt8 = exyz[1+3*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+3*(j+k1+lj)]; zt9 = exyz[2+3*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+3*(j+k1+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+k1+lj)] = zt7; exyz[1+3*(j+k1+lj)] = zt8; exyz[2+3*(j+k1+lj)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 += dth*(dky*zt1 + dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[3*(j+k1+lj)] = zt4; bxyz[1+3*(j+k1+lj)] = zt5; bxyz[2+3*(j+k1+lj)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); /* update magnetic field half time step, ky > 0, kz < 0 */ zt1 = -cimagf(exyz[2+3*(j+kj+l1)]) + crealf(exyz[2+3*(j+kj+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+kj+l1)]) + crealf(exyz[1+3*(j+kj+l1)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+kj+l1)]) + crealf(exyz[3*(j+kj+l1)])*_Complex_I; zt4 = bxyz[3*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+3*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+3*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) - afdt*cu[3*(j+kj+l1)]; zt8 = exyz[1+3*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+3*(j+kj+l1)]; zt9 = exyz[2+3*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+3*(j+kj+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+kj+l1)] = zt7; exyz[1+3*(j+kj+l1)] = zt8; exyz[2+3*(j+kj+l1)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1 + dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[3*(j+kj+l1)] = zt4; bxyz[1+3*(j+kj+l1)] = zt5; bxyz[2+3*(j+kj+l1)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); /* update magnetic field half time step, ky < 0, kz < 0 */ zt1 = -cimagf(exyz[2+3*(j+k1+l1)]) + crealf(exyz[2+3*(j+k1+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+k1+l1)]) + crealf(exyz[1+3*(j+k1+l1)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+k1+l1)]) + crealf(exyz[3*(j+k1+l1)])*_Complex_I; zt4 = bxyz[3*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+3*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+3*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) - afdt*cu[3*(j+k1+l1)]; zt8 = exyz[1+3*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+3*(j+k1+l1)]; zt9 = exyz[2+3*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+3*(j+k1+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+k1+l1)] = zt7; exyz[1+3*(j+k1+l1)] = zt8; exyz[2+3*(j+k1+l1)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 += dth*(dky*zt1 - dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[3*(j+k1+l1)] = zt4; bxyz[1+3*(j+k1+l1)] = zt5; bxyz[2+3*(j+k1+l1)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; afdt = adt*cimagf(ffc[kk+ll]); /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+3*(kj+lj)]) + crealf(exyz[2+3*(kj+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(kj+lj)]) + crealf(exyz[1+3*(kj+lj)])*_Complex_I; zt3 = -cimagf(exyz[3*(kj+lj)]) + crealf(exyz[3*(kj+lj)])*_Complex_I; zt4 = bxyz[3*(kj+lj)] - dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+3*(kj+lj)] - dth*(dkz*zt3); zt6 = bxyz[2+3*(kj+lj)] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(kj+lj)] + cdt*(dky*zt1 - dkz*zt2) - afdt*cu[3*(kj+lj)]; zt8 = exyz[1+3*(kj+lj)] + cdt*(dkz*zt3) - afdt*cu[1+3*(kj+lj)]; zt9 = exyz[2+3*(kj+lj)] - cdt*(dky*zt3) - afdt*cu[2+3*(kj+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(kj+lj)] = zt7; exyz[1+3*(kj+lj)] = zt8; exyz[2+3*(kj+lj)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1 - dkz*zt2); zt5 -= dth*(dkz*zt3); zt6 += dth*(dky*zt3); bxyz[3*(kj+lj)] = zt4; bxyz[1+3*(kj+lj)] = zt5; bxyz[2+3*(kj+lj)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); bxyz[3*(k1+lj)] = zero; bxyz[1+3*(k1+lj)] = zero; bxyz[2+3*(k1+lj)] = zero; exyz[3*(k1+lj)] = zero; exyz[1+3*(k1+lj)] = zero; exyz[2+3*(k1+lj)] = zero; /* update magnetic field half time step, kz < 0 */ zt1 = -cimagf(exyz[2+3*(kj+l1)]) + crealf(exyz[2+3*(kj+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(kj+l1)]) + crealf(exyz[1+3*(kj+l1)])*_Complex_I; zt3 = -cimagf(exyz[3*(kj+l1)]) + crealf(exyz[3*(kj+l1)])*_Complex_I; zt4 = bxyz[3*(kj+l1)] - dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+3*(kj+l1)] + dth*(dkz*zt3); zt6 = bxyz[2+3*(kj+l1)] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(kj+l1)] + cdt*(dky*zt1 + dkz*zt2) - afdt*cu[3*(kj+l1)]; zt8 = exyz[1+3*(kj+l1)] - cdt*(dkz*zt3) - afdt*cu[1+3*(kj+l1)]; zt9 = exyz[2+3*(kj+l1)] - cdt*(dky*zt3) - afdt*cu[2+3*(kj+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(kj+l1)] = zt7; exyz[1+3*(kj+l1)] = zt8; exyz[2+3*(kj+l1)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1 + dkz*zt2); zt5 += dth*(dkz*zt3); zt6 += dth*(dky*zt3); bxyz[3*(kj+l1)] = zt4; bxyz[1+3*(kj+l1)] = zt5; bxyz[2+3*(kj+l1)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; exyz[3*(k1+l1)] = zero; exyz[1+3*(k1+l1)] = zero; exyz[2+3*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+ll]); /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+3*(j+lj)]) + crealf(exyz[2+3*(j+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+lj)]) + crealf(exyz[1+3*(j+lj)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+lj)]) + crealf(exyz[3*(j+lj)])*_Complex_I; zt4 = bxyz[3*(j+lj)] + dth*(dkz*zt2); zt5 = bxyz[1+3*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+3*(j+lj)] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[3*(j+lj)]; zt8 = exyz[1+3*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+3*(j+lj)]; zt9 = exyz[2+3*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+3*(j+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+lj)] = zt7; exyz[1+3*(j+lj)] = zt8; exyz[2+3*(j+lj)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 += dth*(dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[3*(j+lj)] = zt4; bxyz[1+3*(j+lj)] = zt5; bxyz[2+3*(j+lj)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); bxyz[3*(j+k1+lj)] = zero; bxyz[1+3*(j+k1+lj)] = zero; bxyz[2+3*(j+k1+lj)] = zero; exyz[3*(j+k1+lj)] = zero; exyz[1+3*(j+k1+lj)] = zero; exyz[2+3*(j+k1+lj)] = zero; /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+3*(j+l1)]) + crealf(exyz[2+3*(j+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+l1)]) + crealf(exyz[1+3*(j+l1)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+l1)]) + crealf(exyz[3*(j+l1)])*_Complex_I; zt4 = bxyz[3*(j+l1)] - dth*(dkz*zt2); zt5 = bxyz[1+3*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+3*(j+l1)] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[3*(j+l1)]; zt8 = exyz[1+3*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+3*(j+l1)]; zt9 = exyz[2+3*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+3*(j+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+l1)] = zt7; exyz[1+3*(j+l1)] = zt8; exyz[2+3*(j+l1)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[3*(j+l1)] = zt4; bxyz[1+3*(j+l1)] = zt5; bxyz[2+3*(j+l1)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); bxyz[3*(j+k1+l1)] = zero; bxyz[1+3*(j+k1+l1)] = zero; bxyz[2+3*(j+k1+l1)] = zero; exyz[3*(j+k1+l1)] = zero; exyz[1+3*(j+k1+l1)] = zero; exyz[2+3*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ afdt = adt*cimagf(ffc[ll]); /* update magnetic field half time step */ zt2 = -cimagf(exyz[1+3*(lj)]) + crealf(exyz[1+3*(lj)])*_Complex_I; zt3 = -cimagf(exyz[3*(lj)]) + crealf(exyz[3*(lj)])*_Complex_I; zt4 = bxyz[3*lj] + dth*(dkz*zt2); zt5 = bxyz[1+3*lj] - dth*(dkz*zt3); /* update electric field whole time step */ zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*lj] - cdt*(dkz*zt2) - afdt*cu[3*lj]; zt8 = exyz[1+3*lj] + cdt*(dkz*zt3) - afdt*cu[1+3*lj]; /* update magnetic field half time step and store electric field */ zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*lj] = zt7; exyz[1+3*lj] = zt8; exyz[2+3*lj] = zero; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)); zt4 += dth*(dkz*zt2); zt5 -= dth*(dkz*zt3); bxyz[3*lj] = zt4; bxyz[1+3*lj] = zt5; bxyz[2+3*lj] = zero; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)); bxyz[3*(k1+lj)] = zero; bxyz[1+3*(k1+lj)] = zero; bxyz[2+3*(k1+lj)] = zero; exyz[3*(k1+lj)] = zero; exyz[1+3*(k1+lj)] = zero; exyz[2+3*(k1+lj)] = zero; bxyz[3*l1] = zero; bxyz[1+3*l1] = zero; bxyz[2+3*l1] = zero; exyz[3*l1] = zero; exyz[1+3*l1] = zero; exyz[2+3*l1] = zero; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; exyz[3*(k1+l1)] = zero; exyz[1+3*(k1+l1)] = zero; exyz[2+3*(k1+l1)]= zero; sum1 += ws; sum2 += wp; } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum3 = 0.0; sum4 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,dkx,afdt,zt1,zt2,zt3,zt4,zt5,zt6,zt7,zt8,zt9, \ ws,wp) \ reduction(+:sum3,sum4) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; ws = 0.0; wp = 0.0; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk]); /* update magnetic field half time step, ky > 0 */ zt1 = -cimagf(exyz[2+3*(j+kj)]) + crealf(exyz[2+3*(j+kj)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+kj)]) + crealf(exyz[1+3*(j+kj)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+kj)]) + crealf(exyz[3*(j+kj)])*_Complex_I; zt4 = bxyz[3*(j+kj)] - dth*(dky*zt1); zt5 = bxyz[1+3*(j+kj)] + dth*(dkx*zt1); zt6 = bxyz[2+3*(j+kj)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+kj)] + cdt*(dky*zt1) - afdt*cu[3*(j+kj)]; zt8 = exyz[1+3*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+3*(j+kj)]; zt9 = exyz[2+3*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+3*(j+kj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+kj)] = zt7; exyz[1+3*(j+kj)] = zt8; exyz[2+3*(j+kj)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[3*(j+kj)] = zt4; bxyz[1+3*(j+kj)] = zt5; bxyz[2+3*(j+kj)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); /* update magnetic field half time step, ky < 0 */ zt1 = -cimagf(exyz[2+3*(j+k1)]) + crealf(exyz[2+3*(j+k1)])*_Complex_I; zt2 = -cimagf(exyz[1+3*(j+k1)]) + crealf(exyz[1+3*(j+k1)])*_Complex_I; zt3 = -cimagf(exyz[3*(j+k1)]) + crealf(exyz[3*(j+k1)])*_Complex_I; zt4 = bxyz[3*(j+k1)] + dth*(dky*zt1); zt5 = bxyz[1+3*(j+k1)] + dth*(dkx*zt1); zt6 = bxyz[2+3*(j+k1)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*(j+k1)] - cdt*(dky*zt1) - afdt*cu[3*(j+k1)]; zt8 = exyz[1+3*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+3*(j+k1)]; zt9 = exyz[2+3*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+3*(j+k1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*(j+k1)] = zt7; exyz[1+3*(j+k1)] = zt8; exyz[2+3*(j+k1)] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 += dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[3*(j+k1)] = zt4; bxyz[1+3*(j+k1)] = zt5; bxyz[2+3*(j+k1)] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); bxyz[3*(j+kj+l1)] = zero; bxyz[1+3*(j+kj+l1)] = zero; bxyz[2+3*(j+kj+l1)] = zero; exyz[3*(j+kj+l1)] = zero; exyz[1+3*(j+kj+l1)] = zero; exyz[2+3*(j+kj+l1)] = zero; bxyz[3*(j+k1+l1)] = zero; bxyz[1+3*(j+k1+l1)] = zero; bxyz[2+3*(j+k1+l1)] = zero; exyz[3*(j+k1+l1)] = zero; exyz[1+3*(j+k1+l1)] = zero; exyz[2+3*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ afdt = adt*cimagf(ffc[kk]); /* update magnetic field half time step */ zt1 = -cimagf(exyz[2+3*(kj)]) + crealf(exyz[2+3*(kj)])*_Complex_I; zt3 = -cimagf(exyz[3*(kj)]) + crealf(exyz[3*(kj)])*_Complex_I; zt4 = bxyz[3*kj] - dth*(dky*zt1); zt6 = bxyz[2+3*kj] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[3*kj] + cdt*(dky*zt1) - afdt*cu[3*kj]; zt9 = exyz[2+3*kj] - cdt*(dky*zt3) - afdt*cu[2+3*kj]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[3*kj] = zt7; exyz[1+3*kj] = zero; exyz[2+3*kj] = zt9; ws += anorm*(zt7*conjf(zt7) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1); zt6 += dth*(dky*zt3); bxyz[3*kj] = zt4; bxyz[1+3*kj] = zero; bxyz[2+3*kj] = zt6; wp += anorm*(zt4*conjf(zt4) + zt6*conjf(zt6)); bxyz[3*k1] = zero; bxyz[1+3*k1] = zero; bxyz[2+3*k1] = zero; exyz[3*k1] = zero; exyz[1+3*k1] = zero; exyz[2+3*k1] = zero; bxyz[3*(kj+l1)] = zero; bxyz[1+3*(kj+l1)] = zero; bxyz[2+3*(kj+l1)]= zero; exyz[3*(kj+l1)] = zero; exyz[1+3*(kj+l1)] = zero; exyz[2+3*(kj+l1)] = zero; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; exyz[3*(k1+l1)] = zero; exyz[1+3*(k1+l1)] = zero; exyz[2+3*(k1+l1)] = zero; sum3 += ws; sum4 += wp; } ws = 0.0; wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j]); /* update magnetic field half time step */ zt1 = -cimagf(exyz[2+3*j]) + crealf(exyz[2+3*j])*_Complex_I; zt2 = -cimagf(exyz[1+3*j]) + crealf(exyz[1+3*j])*_Complex_I; zt5 = bxyz[1+3*j] + dth*(dkx*zt1); zt6 = bxyz[2+3*j] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt8 = exyz[1+3*j] - cdt*(dkx*zt1) - afdt*cu[1+3*j]; zt9 = exyz[2+3*j] + cdt*(dkx*zt2) - afdt*cu[2+3*j]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; exyz[3*j] = zero; exyz[1+3*j] = zt8; exyz[2+3*j] = zt9; ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[3*j] = zero; bxyz[1+3*j] = zt5; bxyz[2+3*j] = zt6; wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); bxyz[3*(j+k1)] = zero; bxyz[1+3*(j+k1)] = zero; bxyz[2+3*(j+k1)] = zero; exyz[3*(j+k1)] = zero; exyz[1+3*(j+k1)] = zero; exyz[2+3*(j+k1)] = zero; bxyz[3*(j+l1)] = zero; bxyz[1+3*(j+l1)] = zero; bxyz[2+3*(j+l1)] = zero; exyz[3*(j+l1)] = zero; exyz[1+3*(j+l1)] = zero; exyz[2+3*(j+l1)] = zero; bxyz[3*(j+k1+l1)] = zero; bxyz[1+3*(j+k1+l1)] = zero; bxyz[2+3*(j+k1+l1)] = zero; exyz[3*(j+k1+l1)] = zero; exyz[1+3*(j+k1+l1)] = zero; exyz[2+3*(j+k1+l1)] = zero; } bxyz[0] = zero; bxyz[1] = zero; bxyz[2] = zero; exyz[0] = zero; exyz[1] = zero; exyz[2]= zero; bxyz[3*k1] = zero; bxyz[1+3*k1] = zero; bxyz[2+3*k1] = zero; exyz[3*k1] = zero; exyz[1+3*k1] = zero; exyz[2+3*k1] = zero; bxyz[3*l1] = zero; bxyz[1+3*l1] = zero; bxyz[2+3*l1] = zero; exyz[3*l1] = zero; exyz[1+3*l1] = zero; exyz[2+3*l1] = zero; bxyz[3*(k1+l1)] = zero; bxyz[1+3*(k1+l1)] = zero; bxyz[2+3*(k1+l1)] = zero; exyz[3*(k1+l1)] = zero; exyz[1+3*(k1+l1)] = zero; exyz[2+3*(k1+l1)] = zero; *wf = (sum1 + sum3 + ws)*((float) nx)*((float) ny)*((float) nz); *wm = c2*(sum2 + sum4 + wp)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void cmemfield3(float complex fxyz[], float complex exyz[], float complex ffc[], int isign, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine either adds complex vector fields if isign > 0 or copies complex vector fields if isign < 0 includes additional smoothing local data */ int i, j, k, l, nxh, nyh, nzh, k1, l1, kk, kj, ll, lj, nxyhd, nxvyh; float at1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; /* add the fields */ if (isign > 0) { #pragma omp parallel { #pragma omp for nowait \ private(i,j,k,l,k1,l1,kk,kj,ll,lj,at1) for (l = 1; l < nzh; l++) { ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+kk+ll]); for (i = 0; i < 3; i++) { fxyz[i+3*(j+kj+lj)] += exyz[i+3*(j+kj+lj)]*at1; fxyz[i+3*(j+k1+lj)] += exyz[i+3*(j+k1+lj)]*at1; fxyz[i+3*(j+kj+l1)] += exyz[i+3*(j+kj+l1)]*at1; fxyz[i+3*(j+k1+l1)] += exyz[i+3*(j+k1+l1)]*at1; } } } k1 = nxvh*nyh; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+ll]); for (i = 0; i < 3; i++) { fxyz[i+3*(j+lj)] += exyz[i+3*(j+lj)]*at1; fxyz[i+3*(j+k1+lj)] += exyz[i+3*(j+k1+lj)]*at1; fxyz[i+3*(j+l1)] += exyz[i+3*(j+l1)]*at1; fxyz[i+3*(j+k1+l1)] += exyz[i+3*(j+k1+l1)]*at1; } } } } l1 = nxvyh*nzh; #pragma omp parallel for private(i,j,k,k1,kk,kj,at1) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); for (i = 0; i < 3; i++) { fxyz[i+3*(j+kj)] += exyz[i+3*(j+kj)]*at1; fxyz[i+3*(j+k1)] += exyz[i+3*(j+k1)]*at1; fxyz[i+3*(j+kj+l1)] += exyz[i+3*(j+kj+l1)]*at1; fxyz[i+3*(j+k1+l1)] += exyz[i+3*(j+k1+l1)]*at1; } } } k1 = nxvh*nyh; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j]); for (i = 0; i < 3; i++) { fxyz[i+3*j] += exyz[i+3*j]*at1; fxyz[i+3*(j+k1)] += exyz[i+3*(j+k1)]*at1; fxyz[i+3*(j+l1)] += exyz[i+3*(j+l1)]*at1; fxyz[i+3*(j+k1+l1)] += exyz[i+3*(j+k1+l1)]*at1; } } } /* copy the fields */ else if (isign < 0) { #pragma omp parallel { #pragma omp for nowait \ private(i,j,k,l,k1,l1,kk,kj,ll,lj,at1) for (l = 1; l < nzh; l++) { ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+kk+ll]); for (i = 0; i < 3; i++) { fxyz[i+3*(j+kj+lj)] = exyz[i+3*(j+kj+lj)]*at1; fxyz[i+3*(j+k1+lj)] = exyz[i+3*(j+k1+lj)]*at1; fxyz[i+3*(j+kj+l1)] = exyz[i+3*(j+kj+l1)]*at1; fxyz[i+3*(j+k1+l1)] = exyz[i+3*(j+k1+l1)]*at1; } } } k1 = nxvh*nyh; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+ll]); for (i = 0; i < 3; i++) { fxyz[i+3*(j+lj)] = exyz[i+3*(j+lj)]*at1; fxyz[i+3*(j+k1+lj)] = exyz[i+3*(j+k1+lj)]*at1; fxyz[i+3*(j+l1)] = exyz[i+3*(j+l1)]*at1; fxyz[i+3*(j+k1+l1)] = exyz[i+3*(j+k1+l1)]*at1; } } } } l1 = nxvyh*nzh; #pragma omp parallel for private(i,j,k,k1,kk,kj,at1) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); for (i = 0; i < 3; i++) { fxyz[i+3*(j+kj)] = exyz[i+3*(j+kj)]*at1; fxyz[i+3*(j+k1)] = exyz[i+3*(j+k1)]*at1; fxyz[i+3*(j+kj+l1)] = exyz[i+3*(j+kj+l1)]*at1; fxyz[i+3*(j+k1+l1)] = exyz[i+3*(j+k1+l1)]*at1; } } } k1 = nxvh*nyh; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j]); for (i = 0; i < 3; i++) { fxyz[i+3*j] = exyz[i+3*j]*at1; fxyz[i+3*(j+k1)] = exyz[i+3*(j+k1)]*at1; fxyz[i+3*(j+l1)] = exyz[i+3*(j+l1)]*at1; fxyz[i+3*(j+k1+l1)] = exyz[i+3*(j+k1+l1)]*at1; } } } return; } /*--------------------------------------------------------------------*/ void cwfft3rinit(int mixup[], float complex sct[], int indx, int indy, int indz, int nxhyzd, int nxyzhd) { /* this subroutine calculates tables needed by a three dimensional real to complex fast fourier transform and its inverse. input: indx, indy, indz, nxhyzd, nxyzhd output: mixup, sct mixup = array of bit reversed addresses sct = sine/cosine table indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = one half of maximum of (nx,ny,nz) written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, ny, nz, nxyz, nxhyz, nxyzh; int j, k, lb, ll, jb, it; float dnxyz, arg; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; ny = 1L<<indy; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; /* bit-reverse index table: mixup[j] = 1 + reversed bits of j */ for (j = 0; j < nxhyz; j++) { lb = j; ll = 0; for (k = 0; k < ndx1yz; k++) { jb = lb/2; it = lb - 2*jb; lb = jb; ll = 2*ll + it; } mixup[j] = ll + 1; } /* sine/cosine table for the angles 2*n*pi/nxyz */ nxyzh = nxyz/2; dnxyz = 6.28318530717959/(float) nxyz; for (j = 0; j < nxyzh; j++) { arg = dnxyz*(float) j; sct[j] = cosf(arg) - sinf(arg)*_Complex_I; } return; } /*--------------------------------------------------------------------*/ void cfft3rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in x and y is performed f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)* exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in x and y is performed f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd; int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff; float ani; float complex t1, t2, t3; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhyd = nxhd*nyd; if (isign > 0) goto L180; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t2 = t1*f[j2+joff]; f[j2+joff] = f[j1+joff] - t2; f[j1+joff] += t2; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (k = 0; k < ny; k++) { joff = nxhd*k + nn; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } } ani = 2.0*ani; for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; for (i = 0; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = sct[kmr*j]; for (i = 0; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[joff] + t1) + crealf(f[joff] - t1)*_Complex_I); f[joff] = 0.5*(crealf(f[joff] + t1) + cimagf(f[joff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L180: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[joff] - t1); f[joff] += t1; } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; for (i = 0; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = conjf(sct[kmr*j]); for (i = 0; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (k = 0; k < ny; k++) { joff = nxhd*k + nn; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } } for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = conjf(sct[kmr*j]); for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t2 = t1*f[j2+joff]; f[j2+joff] = f[j1+joff] - t2; f[j1+joff] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cfft3rmxz(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in z is performed f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, a forward fourier transform in z is performed f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff; int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1; float complex t1, t2; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhyd = nxhd*nyd; if (isign > 0) goto L90; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; for (i = 0; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; i0 = ioff + j1; i1 = ioff + j2; for (i = 0; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; if (nyi==1) { t1 = f[l1]; f[l1] = 0.5*(cimagf(f[ll] + t1) + crealf(f[ll] - t1)*_Complex_I); f[ll] = 0.5*(crealf(f[ll] + t1) + cimagf(f[ll] - t1)*_Complex_I); } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = f[i1]; f[i1] = 0.5*(cimagf(f[i0] + t1) + crealf(f[i0] - t1)*_Complex_I); f[i0] = 0.5*(crealf(f[i0] + t1) + cimagf(f[i0] - t1)*_Complex_I); } } return; /* forward fourier transform */ L90: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; if (nyi==1) { t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I; f[l1] = conjf(f[ll] - t1); f[ll] += t1; } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I; f[i1] = conjf(f[i0] - t1); f[i0] += t1; } } /* bit-reverse array elements in z */ #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; for (i = 0; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); i0 = ioff + j1; i1 = ioff + j2; for (i = 0; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cfft3rm3xy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in x and y are performed f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*n*j/nx) *exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, three forward fourier transforms in x and y are performed f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd3, nxhyd; int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff; float at1, at2, ani; float complex t1, t2, t3, t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhd3 = 3*nxhd; nxhyd = nxhd3*nyd; if (isign > 0) goto L230; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2,ani,t1, \ t2,t3,t4) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd3*i + nn; for (j = 0; j < nxh; j++) { at1 = crealf(f[2+3*j+joff]); f[2+3*j+joff] = crealf(f[1+3*j+joff]) + cimagf(f[2+3*j+joff])*_Complex_I; at2 = cimagf(f[1+3*j+joff]); f[1+3*j+joff] = cimagf(f[3*j+joff]) + at1*_Complex_I; f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I; } } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd3*i + nn; t1 = f[3*j1+joff]; t2 = f[1+3*j1+joff]; t3 = f[2+3*j1+joff]; f[3*j1+joff] = f[3*j+joff]; f[1+3*j1+joff] = f[1+3*j+joff]; f[2+3*j1+joff] = f[2+3*j+joff]; f[3*j+joff] = t1; f[1+3*j+joff] = t2; f[2+3*j+joff] = t3; } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; for (i = 0; i < ny; i++) { joff = nxhd3*i + nn; t2 = t1*f[3*j2+joff]; t3 = t1*f[1+3*j2+joff]; t4 = t1*f[2+3*j2+joff]; f[3*j2+joff] = f[3*j1+joff] - t2; f[1+3*j2+joff] = f[1+3*j1+joff] - t3; f[2+3*j2+joff] = f[2+3*j1+joff] - t4; f[3*j1+joff] += t2; f[1+3*j1+joff] += t3; f[2+3*j1+joff] += t4; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (k = 0; k < ny; k++) { joff = nxhd3*k + nn; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+3*(nxh-j)+joff]); t1 = f[jj+3*j+joff] + t2; t2 = (f[jj+3*j+joff] - t2)*t3; f[jj+3*j+joff] = ani*(t1 + t2); f[jj+3*(nxh-j)+joff] = ani*conjf(t1 - t2); } } } ani = 2.0*ani; for (k = 0; k < ny; k++) { joff = nxhd3*k + nn; for (jj = 0; jj < 3; jj++) { f[jj+3*nxhh+joff] = ani*conjf(f[jj+3*nxhh+joff]); f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I); } } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd3*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd3*k1 + nn; for (i = 0; i < nxh; i++) { t1 = f[3*i+k1]; t2 = f[1+3*i+k1]; t3 = f[2+3*i+k1]; f[3*i+k1] = f[3*i+joff]; f[1+3*i+k1] = f[1+3*i+joff]; f[2+3*i+k1] = f[2+3*i+joff]; f[3*i+joff] = t1; f[1+3*i+joff] = t2; f[2+3*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd3*(j + k1) + nn; j2 = nxhd3*(j + k2) + nn; t1 = sct[kmr*j]; for (i = 0; i < nxh; i++) { t2 = t1*f[3*i+j2]; t3 = t1*f[1+3*i+j2]; t4 = t1*f[2+3*i+j2]; f[3*i+j2] = f[3*i+j1] - t2; f[1+3*i+j2] = f[1+3*i+j1] - t3; f[2+3*i+j2] = f[2+3*i+j1] - t4; f[3*i+j1] += t2; f[1+3*i+j1] += t3; f[2+3*i+j1] += t4; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd3*k; k1 = nxhd3*ny - joff + nn; joff += nn; for (jj = 0; jj < 3; jj++) { t1 = f[jj+k1]; f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) + crealf(f[jj+joff] - t1)*_Complex_I); f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) + cimagf(f[jj+joff] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L230: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2,t1,t2, \ t3,t4) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd3*k; k1 = nxhd3*ny - joff + nn; joff += nn; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; f[jj+k1] = conjf(f[jj+joff] - t1); f[jj+joff] += t1; } } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd3*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd3*k1 + nn; for (i = 0; i < nxh; i++) { t1 = f[3*i+k1]; t2 = f[1+3*i+k1]; t3 = f[2+3*i+k1]; f[3*i+k1] = f[3*i+joff]; f[1+3*i+k1] = f[1+3*i+joff]; f[2+3*i+k1] = f[2+3*i+joff]; f[3*i+joff] = t1; f[1+3*i+joff] = t2; f[2+3*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd3*(j + k1) + nn; j2 = nxhd3*(j + k2) + nn; t1 = conjf(sct[kmr*j]); for (i = 0; i < nxh; i++) { t2 = t1*f[3*i+j2]; t3 = t1*f[1+3*i+j2]; t4 = t1*f[2+3*i+j2]; f[3*i+j2] = f[3*i+j1] - t2; f[1+3*i+j2] = f[1+3*i+j1] - t3; f[2+3*i+j2] = f[2+3*i+j1] - t4; f[3*i+j1] += t2; f[1+3*i+j1] += t3; f[2+3*i+j1] += t4; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (k = 0; k < ny; k++) { joff = nxhd3*k + nn; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+3*(nxh-j)+joff]); t1 = f[jj+3*j+joff] + t2; t2 = (f[jj+3*j+joff] - t2)*t3; f[jj+3*j+joff] = t1 + t2; f[jj+3*(nxh-j)+joff] = conjf(t1 - t2); } } } for (k = 0; k < ny; k++) { joff = nxhd3*k + nn; for (jj = 0; jj < 3; jj++) { f[jj+3*nxhh+joff] = 2.0*conjf(f[jj+3*nxhh+joff]); f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I; } } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd3*i + nn; t1 = f[3*j1+joff]; t2 = f[1+3*j1+joff]; t3 = f[2+3*j1+joff]; f[3*j1+joff] = f[3*j+joff]; f[1+3*j1+joff] = f[1+3*j+joff]; f[2+3*j1+joff] = f[2+3*j+joff]; f[3*j+joff] = t1; f[1+3*j+joff] = t2; f[2+3*j+joff] = t3; } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = conjf(sct[kmr*j]); for (i = 0; i < ny; i++) { joff = nxhd3*i + nn; t2 = t1*f[3*j2+joff]; t3 = t1*f[1+3*j2+joff]; t4 = t1*f[2+3*j2+joff]; f[3*j2+joff] = f[3*j1+joff] - t2; f[1+3*j2+joff] = f[1+3*j1+joff] - t3; f[2+3*j2+joff] = f[2+3*j1+joff] - t4; f[3*j1+joff] += t2; f[1+3*j1+joff] += t3; f[2+3*j1+joff] += t4; } } } ns = ns2; } /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd3*i + nn; for (j = 0; j < nxh; j++) { at1 = crealf(f[2+3*j+joff]); f[2+3*j+joff] = cimagf(f[1+3*j+joff]) + cimagf(f[2+3*j+joff])*_Complex_I; at2 = crealf(f[1+3*j+joff]); f[1+3*j+joff] = at1 + cimagf(f[3*j+joff])*_Complex_I; f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I; } } } return; } /*--------------------------------------------------------------------*/ void cfft3rm3z(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in z are performed f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, three forward fourier transforms in z are performed f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd3, nxhyd, ioff; int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr; int i0, i1; float complex t1, t2, t3, t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhd3 = 3*nxhd; nxhyd = nxhd3*nyd; if (isign > 0) goto L110; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; /* bit-reverse array elements in z */ #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd3*n; for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; for (i = 0; i < nxh; i++) { t1 = f[3*i+i1]; t2 = f[1+3*i+i1]; t3 = f[2+3*i+i1]; f[3*i+i1] = f[3*i+i0]; f[1+3*i+i1] = f[1+3*i+i0]; f[2+3*i+i1] = f[2+3*i+i0]; f[3*i+i0] = t1; f[1+3*i+i0] = t2; f[2+3*i+i0] = t3; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; i0 = ioff + j1; i1 = ioff + j2; for (i = 0; i < nxh; i++) { t2 = t1*f[3*i+i1]; t3 = t1*f[1+3*i+i1]; t4 = t1*f[2+3*i+i1]; f[3*i+i1] = f[3*i+i0] - t2; f[1+3*i+i1] = f[1+3*i+i0] - t3; f[2+3*i+i1] = f[2+3*i+i0] - t4; f[3*i+i0] += t2; f[1+3*i+i0] += t3; f[2+3*i+i0] += t4; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; if (nyi==1) { for (jj = 0; jj < 3; jj++) { t1 = f[jj+l1]; f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1) + crealf(f[jj+ll] - t1)*_Complex_I); f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1) + cimagf(f[jj+ll] - t1)*_Complex_I); } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (jj = 0; jj < 3; jj++) { i1 = nxhd3*nyh; i0 = i1 + ll; i1 += l1; t1 = f[jj+i1]; f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1) + crealf(f[jj+i0] - t1)*_Complex_I); f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1) + cimagf(f[jj+i0] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L110: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; if (nyi==1) { for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I; f[jj+l1] = conjf(f[jj+ll] - t1); f[jj+ll] += t1; } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (jj = 0; jj < 3; jj++) { i1 = nxhd3*nyh; i0 = i1 + ll; i1 += l1; t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I; f[jj+i1] = conjf(f[jj+i0] - t1); f[jj+i0] += t1; } } } /* bit-reverse array elements in z */ #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd3*n; for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff+ l1; for (i = 0; i < nxh; i++) { t1 = f[3*i+i1]; t2 = f[1+3*i+i1]; t3 = f[2+3*i+i1]; f[3*i+i1] = f[3*i+i0]; f[1+3*i+i1] = f[1+3*i+i0]; f[2+3*i+i1] = f[2+3*i+i0]; f[3*i+i0] = t1; f[1+3*i+i0] = t2; f[2+3*i+i0] = t3; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); i0 = ioff + j1; i1 = ioff + j2; for (i = 0; i < nxh; i++) { t2 = t1*f[3*i+i1]; t3 = t1*f[1+3*i+i1]; t4 = t1*f[2+3*i+i1]; f[3*i+i1] = f[3*i+i0] - t2; f[1+3*i+i1] = f[1+3*i+i0] - t3; f[2+3*i+i1] = f[2+3*i+i0] - t4; f[3*i+i0] += t2; f[1+3*i+i0] += t3; f[2+3*i+i0] += t4; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cwfft3rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for real to complex fft, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ cfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform z fft */ cfft3rmxz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ cfft3rmxz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ cfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); } return; } /*--------------------------------------------------------------------*/ void cwfft3rm3(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for 3 3d real to complex ffts, with packed data */ /* parallelized with OpenMP */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ cfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform z fft */ cfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ cfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ cfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void cdistr3_(float *part, float *vtx, float *vty, float *vtz, float *vdx, float *vdy, float *vdz, int *npx, int *npy, int *npz, int *idimp, int *nop, int *nx, int *ny, int *nz, int *ipbc) { cdistr3(part,*vtx,*vty,*vtz,*vdx,*vdy,*vdz,*npx,*npy,*npz,*idimp, *nop,*nx,*ny,*nz,*ipbc); return; } /*--------------------------------------------------------------------*/ void cdblkp3l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop, int *mx, int *my, int *mz, int *mx1, int *my1, int *mxyz1, int *irc) { cdblkp3l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mz,*mx1,*my1,*mxyz1, irc); return; } /*--------------------------------------------------------------------*/ void cppmovin3l_(float *part, float *ppart, int *kpic, int *nppmx, int *idimp, int *nop, int *mx, int *my, int *mz, int *mx1, int *my1, int *mxyz1, int *irc) { cppmovin3l(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mz,*mx1,*my1, *mxyz1,irc); return; } /*--------------------------------------------------------------------*/ void cppcheck3l_(float *ppart, int *kpic, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *mx1, int *my1, int *mz1, int *irc) { cppcheck3l(ppart,kpic,*idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*mx1, *my1,*mz1,irc); return; } /*--------------------------------------------------------------------*/ void cgbppush3l_(float *ppart, float *fxyz, float *bxyz, int *kpic, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { cgbppush3l(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx, *ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,* mx1,*my1,*mxyz1, *ipbc); return; } /*--------------------------------------------------------------------*/ void cgbppushf3l_(float *ppart, float *fxyz, float *bxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { cgbppushf3l(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp, *nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1, *mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cgrbppush3l_(float *ppart, float *fxyz, float *bxyz, int *kpic, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { cgrbppush3l(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx, *nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1, *ipbc); return; } /*--------------------------------------------------------------------*/ void cgrbppushf3l_(float *ppart, float *fxyz, float *bxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int* ntmax, int *irc) { cgrbppushf3l(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek, *idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv, *mx1,*my1,*mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cgppost3l_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { cgppost3l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,*nzv, *mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void cgjppost3l_(float *ppart, float *cu, int *kpic, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { cgjppost3l(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx,*my, *mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgjppostf3l_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { cgjppostf3l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny, *nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ntmax, irc); return; } /*--------------------------------------------------------------------*/ void cgrjppost3l_(float *ppart, float *cu, int *kpic, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { cgrjppost3l(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz,*mx, *my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgrjppostf3l_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { cgrjppostf3l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,*nx, *ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1, *ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cpporder3l_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { cpporder3l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz,*mx, *my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cpporderf3l_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { cpporderf3l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,*mz1, *npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { ccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void cacguard3l_(float *cu, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { cacguard3l(cu,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void caguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { caguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void cmpois33_(float complex *q, float complex *fxyz, int *isign, float complex *ffc, float *ax, float *ay, float *az, float *affp, float *we, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { cmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh, *nyv,*nzv,*nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void cmcuperp3_(float complex *cu, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv) { cmcuperp3(cu,*nx,*ny,*nz,*nxvh,*nyv,*nzv); return; } /*--------------------------------------------------------------------*/ void cmibpois33_(float complex *cu, float complex *bxyz, float complex *ffc, float *ci, float *wm, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { cmibpois33(cu,bxyz,ffc,*ci,wm,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd, *nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void cmmaxwel3_(float complex *exyz, float complex *bxyz, float complex *cu, float complex *ffc, float *ci, float *dt, float *wf, float *wm, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { cmmaxwel3(exyz,bxyz,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nz,*nxvh,*nyv,*nzv, *nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void cmemfield3_(float complex *fxyz, float complex *exyz, float complex *ffc, int *isign, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { cmemfield3(fxyz,exyz,ffc,*isign,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd, *nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void cwfft3rinit_(int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhyzd, int *nxyzhd) { cwfft3rinit(mixup,sct,*indx,*indy,*indz,*nxhyzd,*nxyzhd); return; } /*--------------------------------------------------------------------*/ void cwfft3rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { cwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; } /*--------------------------------------------------------------------*/ void cwfft3rm3_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { cwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; }
math.h
/*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ // If we are in C++ mode and include <math.h> (not <cmath>) first, we still need // to make sure <cmath> is read first. The problem otherwise is that we haven't // seen the declarations of the math.h functions when the system math.h includes // our cmath overlay. However, our cmath overlay, or better the underlying // overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them // yet we get errors. CUDA avoids this by eagerly declaring all math functions // (in the __device__ space) but we cannot do this. Instead we break the // dependence by forcing cmath to go first. While our cmath will in turn include // this file, the cmath guards will prevent recursion. #ifdef __cplusplus #include <cmath> #endif #ifndef __CLANG_OPENMP_MATH_H__ #define __CLANG_OPENMP_MATH_H__ #ifndef _OPENMP #error "This file is for OpenMP compilation only." #endif #include_next <math.h> // We need limits.h for __clang_cuda_math.h below and because it should not hurt // we include it eagerly here. #include <limits.h> // We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs` // which should live in stdlib.h. #include <stdlib.h> #pragma omp begin declare variant match( \ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) #define __CUDA__ #define __OPENMP_NVPTX__ #include <__clang_cuda_math.h> #undef __OPENMP_NVPTX__ #undef __CUDA__ #pragma omp end declare variant #ifdef __AMDGCN__ #pragma omp begin declare variant match(device = {arch(amdgcn)}) #define __OPENMP_AMDGCN__ #include <__clang_hip_math.h> #undef __OPENMP_AMDGCN__ #pragma omp end declare variant #endif #endif
omp_device_managed_memory.c
// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda // REQUIRES: nvptx64-nvidia-cuda #include <omp.h> #include <stdio.h> void *llvm_omp_target_alloc_shared(size_t, int); int main() { const int N = 64; const int device = omp_get_default_device(); int *shared_ptr = llvm_omp_target_alloc_shared(N * sizeof(int), device); #pragma omp target teams distribute parallel for device(device) \ is_device_ptr(shared_ptr) for (int i = 0; i < N; ++i) { shared_ptr[i] = 1; } int sum = 0; for (int i = 0; i < N; ++i) sum += shared_ptr[i]; omp_target_free(shared_ptr, device); // CHECK: PASS if (sum == N) printf ("PASS\n"); }
fft.c
/* Copyright 2013-2014. The Regents of the University of California. * Copyright 2016-2018. Martin Uecker. * Copyright 2018. Massachusetts Institute of Technology. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2011-2018 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2014 Frank Ong <frankong@berkeley.edu> * 2018 Siddharth Iyer <ssi@mit.edu> * * * FFT. It uses FFTW or CUFFT internally. * * * Gauss, Carl F. 1805. "Nachlass: Theoria Interpolationis Methodo Nova * Tractata." Werke 3, pp. 265-327, Königliche Gesellschaft der * Wissenschaften, Göttingen, 1866 */ #include <assert.h> #include <complex.h> #include <stdbool.h> #include <math.h> #include <fftw3.h> #include "num/multind.h" #include "num/flpmath.h" #include "num/ops.h" #include "misc/misc.h" #include "misc/debug.h" #include "fft.h" #undef fft_plan_s #ifdef USE_CUDA #include "num/gpuops.h" #include "fft-cuda.h" #define LAZY_CUDA #endif void fftscale2(unsigned int N, const long dimensions[N], unsigned long flags, const long ostrides[N], complex float* dst, const long istrides[N], const complex float* src) { long fft_dims[N]; md_select_dims(N, flags, fft_dims, dimensions); float scale = 1. / sqrtf((float)md_calc_size(N, fft_dims)); md_zsmul2(N, dimensions, ostrides, dst, istrides, src, scale); } void fftscale(unsigned int N, const long dims[N], unsigned long flags, complex float* dst, const complex float* src) { long strs[N]; md_calc_strides(N, strs, dims, CFL_SIZE); fftscale2(N, dims, flags, strs, dst, strs, src); } static double fftmod_phase(long length, int j) { long center1 = length / 2; double shift = (double)center1 / (double)length; return ((double)j - (double)center1 / 2.) * shift; } static void fftmod2_r(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src, bool inv, double phase) { if (0 == flags) { md_zsmul2(N, dims, ostrs, dst, istrs, src, cexp(M_PI * 2.i * (inv ? -phase : phase))); return; } /* this will also currently be slow on the GPU because we do not * support strides there on the lowest level */ unsigned int i = N - 1; while (!MD_IS_SET(flags, i)) i--; #if 1 // If there is only one dimensions left and it is the innermost // which is contiguous optimize using md_zfftmod2 if ((0u == MD_CLEAR(flags, i)) && (1 == md_calc_size(i, dims)) && (CFL_SIZE == ostrs[i]) && (CFL_SIZE == istrs[i])) { md_zfftmod2(N - i, dims + i, ostrs + i, dst, istrs + i, src, inv, phase); return; } #endif long tdims[N]; md_select_dims(N, ~MD_BIT(i), tdims, dims); #pragma omp parallel for for (int j = 0; j < dims[i]; j++) fftmod2_r(N, tdims, MD_CLEAR(flags, i), ostrs, (void*)dst + j * ostrs[i], istrs, (void*)src + j * istrs[i], inv, phase + fftmod_phase(dims[i], j)); } static unsigned long clear_singletons(unsigned int N, const long dims[N], unsigned long flags) { return (0 == N) ? flags : clear_singletons(N - 1, dims, (1 == dims[N - 1]) ? MD_CLEAR(flags, N - 1) : flags); } void fftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src) { fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, false, 0.); } /* * The correct usage is fftmod before and after fft and * ifftmod before and after ifft (this is different from * how fftshift/ifftshift has to be used) */ void ifftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src) { fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, true, 0.); } void fftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src) { long strs[N]; md_calc_strides(N, strs, dimensions, CFL_SIZE); fftmod2(N, dimensions, flags, strs, dst, strs, src); } void ifftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src) { long strs[N]; md_calc_strides(N, strs, dimensions, CFL_SIZE); ifftmod2(N, dimensions, flags, strs, dst, strs, src); } void ifftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src) { long pos[N]; md_set_dims(N, pos, 0); for (unsigned int i = 0; i < N; i++) if (MD_IS_SET(flags, i)) pos[i] = dims[i] - dims[i] / 2; md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE); } void ifftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src) { long strs[N]; md_calc_strides(N, strs, dimensions, CFL_SIZE); ifftshift2(N, dimensions, flags, strs, dst, strs, src); } void fftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src) { long pos[N]; md_set_dims(N, pos, 0); for (unsigned int i = 0; i < N; i++) if (MD_IS_SET(flags, i)) pos[i] = dims[i] / 2; md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE); } void fftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src) { long strs[N]; md_calc_strides(N, strs, dimensions, CFL_SIZE); fftshift2(N, dimensions, flags, strs, dst, strs, src); } struct fft_plan_s { INTERFACE(operator_data_t); fftwf_plan fftw; unsigned int D; unsigned long flags; bool backwards; const long* dims; const long* istrs; const long* ostrs; #ifdef USE_CUDA struct fft_cuda_plan_s* cuplan; #endif }; static DEF_TYPEID(fft_plan_s); #ifdef USE_FFTW_WISDOM static char* fftw_wisdom_name(int N, bool backwards, unsigned int flags, const long dims[N]) { char* tbpath = getenv("TOOLBOX_PATH"); if (NULL == tbpath) return NULL; // Space for path and null terminator. int space = snprintf(NULL, 0, "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags); // Space for dimensions. for (int idx = 0; idx < N; idx ++) space += snprintf(NULL, 0, "_%lu", dims[idx]); // Space for extension. space += snprintf(NULL, 0, ".fftw"); // Space for null terminator. space += 1; int len = space; char* loc = calloc(space, sizeof(char)); if (NULL == loc) error("memory out"); int ret = snprintf(loc, len, "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags); assert(ret < len); len -= ret; for (int idx = 0; idx < N; idx++) { char tmp[64]; ret = sprintf(tmp, "_%lu", dims[idx]); assert(ret < 64); len -= ret; strcat(loc, tmp); } strcat(loc, ".fftw"); len -= 5; assert(1 == len); assert('\0' == loc[space - 1]); return loc; } #endif //USE_FFTW_WISDOM static fftwf_plan fft_fftwf_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards, bool measure) { fftwf_plan fftwf; unsigned int N = D; fftwf_iodim64 dims[N]; fftwf_iodim64 hmdims[N]; unsigned int k = 0; unsigned int l = 0; #ifdef USE_FFTW_WISDOM char* wisdom = fftw_wisdom_name(D, backwards, flags, dimensions); if (NULL != wisdom) fftwf_import_wisdom_from_filename(wisdom); #endif //USE_FFTW_WISDOM //FFTW seems to be fine with this //assert(0 != flags); for (unsigned int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { dims[k].n = dimensions[i]; dims[k].is = istrides[i] / CFL_SIZE; dims[k].os = ostrides[i] / CFL_SIZE; k++; } else { hmdims[l].n = dimensions[i]; hmdims[l].is = istrides[i] / CFL_SIZE; hmdims[l].os = ostrides[i] / CFL_SIZE; l++; } } #pragma omp critical fftwf = fftwf_plan_guru64_dft(k, dims, l, hmdims, (complex float*)src, dst, backwards ? 1 : (-1), measure ? FFTW_MEASURE : FFTW_ESTIMATE); #ifdef USE_FFTW_WISDOM if (NULL != wisdom) fftwf_export_wisdom_to_filename(wisdom); md_free(wisdom); #endif //USE_FFTW_WISDOM return fftwf; } static void fft_apply(const operator_data_t* _plan, unsigned int N, void* args[N]) { complex float* dst = args[0]; const complex float* src = args[1]; const auto plan = CAST_DOWN(fft_plan_s, _plan); assert(2 == N); if (0u == plan->flags) { md_copy2(plan->D, plan->dims, plan->ostrs, dst, plan->istrs, src, CFL_SIZE); return; } #ifdef USE_CUDA if (cuda_ondevice(src)) { #ifdef LAZY_CUDA if (NULL == plan->cuplan) ((struct fft_plan_s*)plan)->cuplan = fft_cuda_plan(plan->D, plan->dims, plan->flags, plan->ostrs, plan->istrs, plan->backwards); #endif assert(NULL != plan->cuplan); fft_cuda_exec(plan->cuplan, dst, src); } else #endif { assert(NULL != plan->fftw); fftwf_execute_dft(plan->fftw, (complex float*)src, dst); } } static void fft_free_plan(const operator_data_t* _data) { const auto plan = CAST_DOWN(fft_plan_s, _data); if (NULL != plan->fftw) fftwf_destroy_plan(plan->fftw); #ifdef USE_CUDA if (NULL != plan->cuplan) fft_cuda_free_plan(plan->cuplan); #endif xfree(plan->dims); xfree(plan->istrs); xfree(plan->ostrs); xfree(plan); } const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards) { flags &= md_nontriv_dims(D, dimensions); PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); complex float* src = md_alloc(D, dimensions, CFL_SIZE); complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE); long strides[D]; md_calc_strides(D, strides, dimensions, CFL_SIZE); plan->fftw = NULL; if (0u != flags) plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true); md_free(src); if (!inplace) md_free(dst); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src) && (0u != flags) plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards); #endif #endif plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, strides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, strides); plan->ostrs = *PTR_PASS(ostrs); return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); } const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards) { flags &= md_nontriv_dims(D, dimensions); PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); plan->fftw = NULL; if (0u != flags) plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src) && (0u != flags) plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards); #endif #endif plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, istrides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, ostrides); plan->ostrs = *PTR_PASS(ostrs); return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); } const struct operator_s* fft_create(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src, bool backwards) { long strides[D]; md_calc_strides(D, strides, dimensions, CFL_SIZE); return fft_create2(D, dimensions, flags, strides, dst, strides, src, backwards); } void fft_exec(const struct operator_s* o, complex float* dst, const complex float* src) { operator_apply_unchecked(o, dst, src); } void fft_free(const struct operator_s* o) { operator_free(o); } void fft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, false); fft_exec(plan, dst, src); fft_free(plan); } void ifft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, true); fft_exec(plan, dst, src); fft_free(plan); } void fft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src) { const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, false); fft_exec(plan, dst, src); fft_free(plan); } void ifft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src) { const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, true); fft_exec(plan, dst, src); fft_free(plan); } void fftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { fftmod(D, dimensions, flags, dst, src); fft(D, dimensions, flags, dst, dst); fftmod(D, dimensions, flags, dst, dst); } void ifftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { ifftmod(D, dimensions, flags, dst, src); ifft(D, dimensions, flags, dst, dst); ifftmod(D, dimensions, flags, dst, dst); } void fftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { fftmod2(D, dimensions, flags, ostrides, dst, istrides, src); fft2(D, dimensions, flags, ostrides, dst, ostrides, dst); fftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst); } void ifftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { ifftmod2(D, dimensions, flags, ostrides, dst, istrides, src); ifft2(D, dimensions, flags, ostrides, dst, ostrides, dst); ifftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst); } void fftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { fft(D, dimensions, flags, dst, src); fftscale(D, dimensions, flags, dst, dst); } void ifftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { ifft(D, dimensions, flags, dst, src); fftscale(D, dimensions, flags, dst, dst); } void fftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { fft2(D, dimensions, flags, ostrides, dst, istrides, src); fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst); } void ifftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { ifft2(D, dimensions, flags, ostrides, dst, istrides, src); fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst); } void fftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { fftc(D, dimensions, flags, dst, src); fftscale(D, dimensions, flags, dst, dst); } void ifftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { ifftc(D, dimensions, flags, dst, src); fftscale(D, dimensions, flags, dst, dst); } void fftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { fftc2(D, dimensions, flags, ostrides, dst, istrides, src); fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst); } void ifftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src) { ifftc2(D, dimensions, flags, ostrides, dst, istrides, src); fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst); } bool fft_threads_init = false; void fft_set_num_threads(unsigned int n) { #ifdef FFTWTHREADS #pragma omp critical if (!fft_threads_init) { fft_threads_init = true; fftwf_init_threads(); } #pragma omp critical fftwf_plan_with_nthreads(n); #else UNUSED(n); #endif }
DRB058-jacobikernel-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two parallel for loops within one single parallel region, combined with private() and reduction(). */ #include <stdio.h> #include <math.h> #define MSIZE 200 #include <omp.h> int n = 200; int m = 200; int mits = 1000; double tol = 0.0000000001; double relax = 1.0; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; void initialize() { int i; int j; int xx; int yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy) for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } void jacobi() { double omega; int i; int j; int k; double error; double resid; double ax; double ay; double b; omega = relax; /* Initialize coefficients */ dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* X-direction coef */ ax = 1.0 / (dx * dx); /* Y-direction coef */ ay = 1.0 / (dy * dy); /* Central coeff */ b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; error = 10.0 * tol; k = 1; while(k <= mits){ error = 0.0; /* Copy new solution into old */ #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { uold[i][j] = u[i][j]; } } #pragma omp parallel for private (resid,i,j) reduction (+:error) for (i = 1; i <= n - 1 - 1; i += 1) { #pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } /* Error check */ k = k + 1; error = sqrt(error) / (n * m); /* End iteration loop */ } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); } int main() { initialize(); jacobi(); return 0; }
learn.c
/**************************************************************/ /*! \File learn.c \brief This is the file containing the fundamental functions to learn the model, used by GLSLIM and GLSLIMr0. \author Evangelia Christakopoulou \version 1.0 \date 2016 */ /**************************************************************/ #include<slim.h> /*****************************************************************************/ /*! \brief This function creates a new training matrix A'' and then calls the main function local_learn, for computing the model. Original training matrix A is of size n * m (rows * cols) An intermediate training matrix A' is created which has: nrows = rows of the original training matrix A ncols = (number_of_clusters + 1) * cols of the original matrix A Every user has then exactly the nnzs of A plus the same nnzs copied for the cluster in which he belongs. For all the other clusters he has 0 entries. In order to be able to regularize properly, we add underneath A' a diagonal matrix which is of size: ncols * ncols. This diagonal matrix contains the regularization params across the diagonal. For the submatrix m * m (cols * cols) it has the global regularization (lg) across the diagonal and for the rest, it has the local regularization (ll). The final matrix is A''. Example: for 3 clusters: m m m m n [ ] [ ] [ b ] n m [ lg ] [ w ] [ 0 ] m m [ ll ] * [ ] = [ 0 ] m m [ ll ] [ ] [ 0 ] m m [ ll ] [ 0 ] m w is of size A'' 4m * 1. \param[in] ctrl The ctrl structure. \param[in] train The training data A. \param[in] participation The assignment of users to clusters. \param[in] g The vector with the weights of the users \param[in] prev_model The previous model (used to expedite the learning) \return model The model learnt. */ /*****************************************************************************/ gk_csr_t *learn(ctrl_t * ctrl, gk_csr_t * train, int *participation, double *g, gk_csr_t * prev_model) { int i, pos, j, offset, global_nnz; gk_csr_t *mat, *model = NULL; global_nnz = train->rowptr[train->nrows] - train->rowptr[0]; mat = gk_csr_Create(); mat->ncols = (ctrl->num_clusters + 1) * train->ncols; mat->nrows = train->nrows + mat->ncols; mat->rowptr = gk_zmalloc(mat->nrows + 1, "gk_csr_Dep: local_rowptr"); mat->rowind = gk_imalloc(2 * global_nnz + mat->ncols, "gk_csr_Dep: local_rowind"); mat->rowval = gk_fmalloc(2 * global_nnz + mat->ncols, "gk_csr_Dep: local_rowval"); mat->rowptr[0] = 0; for (i = 0, pos = 0; i < train->nrows; i++) { /* Copying the original training matrix A */ for (j = train->rowptr[i]; j < train->rowptr[i + 1]; j++) { mat->rowind[pos] = train->rowind[j]; mat->rowval[pos] = train->rowval[j] * g[i]; pos++; } /* Copying the nnzs of the user to the cluster to which he belongs */ offset = (participation[i] + 1) * train->ncols; for (j = train->rowptr[i]; j < train->rowptr[i + 1]; j++) { mat->rowind[pos] = train->rowind[j] + offset; mat->rowval[pos] = train->rowval[j] * (1 - g[i]); pos++; } mat->rowptr[i + 1] = pos; } /* Adding the diagonal matrix underneath A' */ /* Global regularization */ for (i = train->nrows; i < train->nrows + train->ncols; i++, pos++) { mat->rowind[pos] = i - train->nrows; mat->rowval[pos] = ctrl->beta; mat->rowptr[i] = pos; } /* Local regularization */ for (i = train->nrows + train->ncols; i < mat->nrows; i++, pos++) { mat->rowind[pos] = i - train->nrows; mat->rowval[pos] = ctrl->local_beta; mat->rowptr[i] = pos; } mat->rowptr[mat->nrows] = pos; gk_csr_CreateIndex(mat, GK_CSR_COL); /* Learning the model */ model = local_learn(ctrl, mat, prev_model); gk_csr_Free(&mat); return model; } /**************************************************************/ /*! \brief Learning \details This routine contains the learning algorithm used by GLSLIM and GLSLIMr0. \param[in] ctrl A ctrl structure which contains all the parameters \param[in] train The training data A'' \param[in] prev_model The previous model- used to speed up learning. It is optional argument. \return model The model returned. */ /**************************************************************/ gk_csr_t *local_learn(ctrl_t * ctrl, gk_csr_t * train, gk_csr_t * prev_model) { int i, nr, nc, ni, pos, j, ii; int global_nnz; int basestart, baseend, datasize, step, starti, endi; gk_csr_t *mat = NULL; double *bl, *bu, *c; int *iinds, *jinds; float *vals; int *nnzs = NULL; int *rinds1 = NULL; int *rinds = NULL; int *rjinds = NULL; float *rvals = NULL; int rank = 0; int max_nnzs = 0; double tmr; int *rrowcnt = NULL; /* set up timers */ gk_clearwctimer(tmr); gk_startwctimer(tmr); /* constants used across all problems */ nr = train->nrows; nc = train->ncols; ni = train->ncols / (ctrl->num_clusters + 1); /* mallocing */ bl = gk_dsmalloc(nc, ctrl->bl, "malloc bl"); /*lower bound */ bu = gk_dsmalloc(nc, ctrl->bu, "malloc bu"); /*upper bound */ c = gk_dmalloc(nc, "malloc c"); /*linear vector */ gk_dset(ni, ctrl->lambda, c); gk_dset(nc - ni, ctrl->local_lambda, c + ni); /*starting and ending columns */ basestart = (ctrl->starti >= 0) ? ctrl->starti : 0; baseend = (ctrl->endi >= 0) ? ctrl->endi : ni; datasize = baseend - basestart; step = (datasize / ctrl->num_procs) + (ctrl->id < (datasize % ctrl->num_procs) ? 1 : 0); starti = ((datasize / ctrl->num_procs) * ctrl->id) + gk_min(ctrl->id, datasize % ctrl->num_procs); endi = starti + step; if ((endi < datasize) && (ctrl->id == ctrl->num_procs - 1)) { endi = datasize; step = datasize - starti; } pos = 0; iinds = gk_ismalloc(step * nc, 0, "malloc iinds"); jinds = gk_ismalloc(step * nc, 0, "malloc jinds"); vals = gk_fsmalloc(step * nc, 0, "malloc vals"); /* go through all columns */ #pragma omp parallel num_threads(ctrl->num_threads) { int mypos; double *w, *b; wspace_t *myws; BCLS *ls; myws = (wspace_t *) gk_malloc(sizeof(wspace_t), "myws"); myws->mat = train; myws->ncols = ni; ls = bcls_create_prob(nr, nc); w = gk_dsmalloc(nc, 0, "malloc w"); b = gk_dsmalloc(nr, 0, "malloc b"); #pragma omp for private(i, j) schedule(dynamic) for (i = starti; i < endi; i++) { // this column is totally empty if (train->colptr[i + 1] - train->colptr[i] == 0) { continue; } /**********************************************************/ /* BCLS learning */ /**********************************************************/ /* get the i-th column from A */ for (j = train->colptr[i]; j < train->colptr[i + 1] - 1; j++) { ii = train->colind[j]; b[ii] = 1; } myws->max_bcls_niters = gk_min(ctrl->max_bcls_niters, 50 * (train->colptr[i + 1] - train->colptr[i])); gk_dset(nc, 0, w); // disable myws->acol = i; if (prev_model != NULL) { get_row(prev_model, i, w); } bcsol(ctrl, b, w, myws, bl, bu, 0, c, ls); for (j = train->colptr[i]; j < train->colptr[i + 1] - 1; j++) { ii = train->colind[j]; b[ii] = 0; } /**********************************************************/ /* dump the data */ /**********************************************************/ /* compute the triplets */ #pragma omp critical { for (j = 0; j < nc; j++) { if (w[j] > EPSILON) { mypos = pos++; iinds[mypos] = i; jinds[mypos] = j; vals[mypos] = w[j]; } } } } // end of starti - endi bcls_free_prob(ls); gk_free((void **) &myws, (void **) &b, (void **) &w, LTERM); } gk_stopwctimer(tmr); /* if(ctrl->id == 0){ printf("time passed is %f\n", gk_getwctimer(tmr)); }*/ /**********************************************************/ /* Combine all the mat of the different processes to the total_mat with MPI */ /**********************************************************/ if (ctrl->id == 0) { nnzs = gk_imalloc(ctrl->num_procs, "malloc nnzs"); gk_iset(ctrl->num_procs, 0, nnzs); } MPI_Gather(&pos, 1, MPI_INT, nnzs, 1, MPI_INT, 0, MPI_COMM_WORLD); if (ctrl->id == 0) { global_nnz = 0; for (i = 0; i < ctrl->num_procs; i++) { global_nnz += nnzs[i]; } } MPI_Bcast(&global_nnz, 1, MPI_INT, 0, MPI_COMM_WORLD); /* Finding the max nnzs between all nodes. This covers the case when a node which is not 0 has a bigger iinds/jinds/vals matrix than the one in node 0 */ if (ctrl->id == 0) { max_nnzs = nnzs[0]; for (rank = 1; rank < ctrl->num_procs; rank++) if (nnzs[rank] > max_nnzs) max_nnzs = nnzs[rank]; } /* Each node creates its own row count. This gets sent to node 0, in order to create the total rowptr. */ if (global_nnz / ctrl->num_procs >= ni) { rrowcnt = gk_ismalloc(ni, 0, "rrowcnt"); for (i = 0; i < pos; i++) { rrowcnt[iinds[i]]++; } } /* Every node sends its own iinds, jinds and vals. */ if (ctrl->id != 0) { if (global_nnz / ctrl->num_procs < ni) { MPI_Send(iinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD); } else { MPI_Send(rrowcnt, ni, MPI_INT, 0, 0, MPI_COMM_WORLD); } MPI_Send(iinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD); MPI_Send(jinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD); MPI_Send(vals, pos, MPI_FLOAT, 0, 0, MPI_COMM_WORLD); } if (ctrl->id == 0) { if (global_nnz / ctrl->num_procs < ni) { rinds1 = gk_icopy(nnzs[0], iinds, gk_imalloc(max_nnzs, "rinds1")); } rinds = gk_icopy(nnzs[0], iinds, gk_imalloc(max_nnzs, "rinds")); rjinds = gk_icopy(nnzs[0], jinds, gk_imalloc(max_nnzs, "rjinds")); rvals = gk_fcopy(nnzs[0], vals, gk_fmalloc(max_nnzs, "rvals")); } gk_free((void **) &iinds, &jinds, &vals, &bl, &bu, &c, LTERM); /* Allocate and populate matrix */ mat = gk_csr_Create(); mat->nrows = ni; mat->ncols = nc; mat->rowptr = gk_zsmalloc(ni + 1, 0, "rowptr"); mat->rowind = gk_imalloc(global_nnz, "rowind"); mat->rowval = gk_fmalloc(global_nnz, "rowval"); if (ctrl->id == 0) { if (global_nnz / ctrl->num_procs < ni) { for (rank = 0; rank < ctrl->num_procs; rank++) { if (rank != 0) { MPI_Recv(rinds1, nnzs[rank], MPI_INT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } for (i = 0; i < nnzs[rank]; i++) { mat->rowptr[rinds1[i]]++; } } } else { for (rank = 0; rank < ctrl->num_procs; rank++) { if (rank != 0) { MPI_Recv(rrowcnt, ni, MPI_INT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } for (i = 0; i < ni; i++) { mat->rowptr[i] += rrowcnt[i]; } } } MAKECSR(i, mat->nrows, mat->rowptr); for (rank = 0; rank < ctrl->num_procs; rank++) { if (rank != 0) { MPI_Recv(rinds, nnzs[rank], MPI_INT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(rjinds, nnzs[rank], MPI_INT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(rvals, nnzs[rank], MPI_FLOAT, rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } for (i = 0; i < nnzs[rank]; i++) { mat->rowind[mat->rowptr[rinds[i]]] = rjinds[i]; mat->rowval[mat->rowptr[rinds[i]]] = rvals[i]; mat->rowptr[rinds[i]]++; } } SHIFTCSR(i, mat->nrows, mat->rowptr); gk_free((void **) &rinds, &rjinds, &rvals, &nnzs, LTERM); if (global_nnz / ctrl->num_procs < ni) { gk_free((void **) &rinds1, LTERM); } } if (rrowcnt != NULL) { gk_free((void **) &rrowcnt, LTERM); } /* Broadcast the matrix to the other nodes */ MPI_Bcast(mat->rowptr, ni + 1, MPI_LONG, 0, MPI_COMM_WORLD); MPI_Bcast(mat->rowind, global_nnz, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(mat->rowval, global_nnz, MPI_FLOAT, 0, MPI_COMM_WORLD); return mat; }
normalize_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jxyang@openailab.com */ #include <math.h> #include <unistd.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "normalize_param.h" static void norm_channel(float* input, float* output, float* buffer, float* scale, int hw, int channel, int num_thread) { memset(buffer, 0, hw * sizeof(float)); //#pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel; i++) { for (int j = 0; j < hw; j++) { float data = *(input + i * hw + j); buffer[j] += (data * data); } } //#pragma omp parallel for num_threads(num_thread) for (int j = 0; j < hw; j++) { buffer[j] = 1.f / sqrt(buffer[j]); } //#pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel; i++) { for (int j = 0; j < hw; j++) { float data = *(input + i * hw + j); *(output + i * hw + j) = data * buffer[j] * scale[i]; } } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct ir_tensor* scale_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); normalize_param_t* param = ( normalize_param_t* )(ir_node->op.param_mem); float* input_org = ( float* )input_tensor->data; float* output_org = ( float* )output_tensor->data; float* sclae_org = ( float* )scale_tensor->data; int batch_number = input_tensor->dims[0]; int channel_num = input_tensor->dims[1]; int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); int img_size = channel_num * channel_size; float* buffer = ( float* )sys_malloc(channel_size * sizeof(float)); if (param->channel_shared == 0 && param->across_spatial == 0) { for (int i = 0; i < batch_number; i++) { norm_channel(input_org, output_org, buffer, sclae_org, channel_size, channel_num, exec_graph->num_thread); input_org += img_size; output_org += img_size; } } sys_free(buffer); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops normalize_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int ret_normalize_node_ops(void* arg) { return register_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops); } static int unret_normalize_node_ops(void* arg) { return unregister_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops); } AUTO_REGISTER_OPS(ret_normalize_node_ops); AUTO_UNREGISTER_OPS(unret_normalize_node_ops);
DRB052-indirectaccesssharebase-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is to mimic a memory access pattern extracted from an LLNL proxy app. Two pointers have distance of 12. They are used as base addresses of two arrays, indexed through an index set. The index set has no two indices with distance of 12. So there is no loop carried dependence. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf("Error, malloc() returns NULL. End execution. \n"); return 1; } double * xa1 = base; double * xa2 = base + 12; int i; for (i =521; i<= 2025; ++i) { base[i]=0.0; } #pragma omp parallel for for (i =0; i< N; ++i) // this level of loop has no loop carried dependence { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } // verify the results, no overlapping of xa1 vs. xa2, no addition happens to the same element twice for (i =521; i<= 2025; ++i) { //printf ("%f ", base[i]); assert (base[i]!=4.0); } free (base); return 0; }
spectralnorm.gcc-4.c
/* * The Computer Language Benchmarks Game * http://shootout.alioth.debian.org/ * * Original C contributed by Sebastien Loisel * Conversion to C++ by Jon Harrop * OpenMP parallelize by The Anh Tran * Add SSE by The Anh Tran * Reconversion into C by Dan Farina */ #define _GNU_SOURCE #include <omp.h> #include <math.h> #include <sched.h> #include <stdio.h> #include <stdlib.h> #define false 0 #define true 1 /* define SIMD data type. 2 doubles encapsulated in one XMM register */ typedef double v2dt __attribute__((vector_size(16))); static const v2dt v1 = {1.0, 1.0}; /* parameter for evaluate functions */ struct Param { double* u; /* source vector */ double* tmp; /* temporary */ double* v; /* destination vector */ int N; /* source/destination vector length */ int N2; /* = N/2 */ int r_begin; /* working range of each thread */ int r_end; }; /* Return: 1.0 / (i + j) * (i + j +1) / 2 + i + 1; */ static double eval_A(int i, int j) { /* * 1.0 / (i + j) * (i + j +1) / 2 + i + 1; * n * (n+1) is even number. Therefore, just (>> 1) for (/2) */ int d = (((i+j) * (i+j+1)) >> 1) + i+1; return 1.0 / d; } /* * Return type: 2 doubles in xmm register [double1, double2] * double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1; * double2 = 1.0 / (i+1 + j) * (i+1 + j +1) / 2 + i+1 + 1; */ static v2dt eval_A_i(int i, int j) { int d1 = (((i+j) * (i+j+1)) >> 1) + i+1; int d2 = (((i+1 +j) * (i+1 +j+1)) >> 1) + (i+1) +1; v2dt r = {d1, d2}; return v1 / r; } /* * Return type: 2 doubles in xmm register [double1, double2] * double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1; * double2 = 1.0 / (i + j+1) * (i + j+1 +1) / 2 + i + 1; */ static v2dt eval_A_j(int i, int j) { int d1 = (((i+j) * (i+j+1)) >> 1) + i+1; int d2 = (((i+ j+1) * (i+ j+1 +1)) >> 1) + i+1; v2dt r = {d1, d2}; return v1 / r; } /* This function is called by many threads */ static void eval_A_times_u(struct Param *p) { /* alias of source vector */ const v2dt *pU = (void *) p->u; int i; int ie; for (i = p->r_begin, ie = p->r_end; i < ie; i++) { v2dt sum = {0, 0}; /* xmm = 2 doubles. This loop run from [0 .. N/2) */ int j; for (j = 0; j < p->N2; j++) sum += pU[j] * eval_A_j(i, j*2); /* write result */ { double *mem = (void *) &sum; p->tmp[i] = mem[0] + mem[1]; } /* If source vector is odd size. This should be called <= 1 time */ for (j = j*2; __builtin_expect(j < p->N, false); j++) p->tmp[i] += eval_A(i, j) * p->u[j]; } } static void eval_At_times_u(struct Param *p) { const v2dt *pT = (void *) p->tmp; int i; int ie; for (i = p->r_begin, ie = p->r_end; i < ie; i++) { v2dt sum = {0, 0}; int j; for (j = 0; j < p->N2; j++) sum += pT[j] * eval_A_i(j*2, i); { double *mem = (void *) &sum; p->v[i] = mem[0] + mem[1]; } /* odd size array */ for (j = j*2; __builtin_expect(j < p->N, false); j++) p->v[i] += eval_A(j, i) * p->tmp[j]; } } /* * Called by N threads. * * Each thread modifies its portion in destination vector -> barrier needed to * sync access */ static void eval_AtA_times_u(struct Param *p) { eval_A_times_u(p); #pragma omp barrier eval_At_times_u(p); #pragma omp barrier } /* * Shootout bench uses affinity to emulate single core processor. This * function searches for appropriate number of threads to spawn. */ static int GetThreadCount() { cpu_set_t cs; int i; int count = 0; CPU_ZERO(&cs); sched_getaffinity(0, sizeof(cs), &cs); for (i = 0; i < 16; i++) if (CPU_ISSET(i, &cs)) count++; return count; } static double spectral_game(int N) { /* Align 64 byte for L2 cache line */ __attribute__((aligned(64))) double u[N]; __attribute__((aligned(64))) double tmp[N]; __attribute__((aligned(64))) double v[N]; double vBv = 0.0; double vv = 0.0; #pragma omp parallel default(shared) num_threads(GetThreadCount()) { int i; #pragma omp for schedule(static) for (i = 0; i < N; i++) u[i] = 1.0; /* * this block will be executed by NUM_THREADS variable declared in this * block is private for each thread */ int threadid = omp_get_thread_num(); int threadcount = omp_get_num_threads(); int chunk = N / threadcount; int ite; struct Param my_param; my_param.tmp = tmp; my_param.N = N; my_param.N2 = N/2; /* * calculate each thread's working range [range1 .. range2) => static * schedule here */ my_param.r_begin = threadid * chunk; my_param.r_end = (threadid < (threadcount -1)) ? (my_param.r_begin + chunk) : N; for (ite = 0; ite < 10; ite++) { my_param.u = u; /* source vec is u */ my_param.v = v; /* destination vec is v */ eval_AtA_times_u(&my_param); my_param.u = v; /* source is v */ my_param.v = u; /* destination is u */ eval_AtA_times_u(&my_param); } /* multi thread adding */ { int i; #pragma omp for schedule(static) reduction( + : vBv, vv ) nowait for (i = 0; i < N; i++) { vv += v[i] * v[i]; vBv += u[i] * v[i]; } } } /* end parallel region */ return sqrt(vBv/vv); } int main(int argc, char *argv[]) { int N = ((argc >= 2) ? atoi(argv[1]) : 2000); printf("%.9f\n", spectral_game(N)); return 0; }
GB_unop__identity_fc64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_int32) // op(A') function: GB (_unop_tran__identity_fc64_int32) // C type: GxB_FC64_t // A type: int32_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_int32) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint64 // op(A') function: GB_tran__abs_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint8) // A*D function (colscale): GB (_AxD__lor_uint8) // D*A function (rowscale): GB (_DxB__lor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__lor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__lor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint8) // C=scalar+B GB (_bind1st__lor_uint8) // C=scalar+B' GB (_bind1st_tran__lor_uint8) // C=A+scalar GB (_bind2nd__lor_uint8) // C=A'+scalar GB (_bind2nd_tran__lor_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT8 || GxB_NO_LOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
2018-collapsemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* two dimensional array computation. 2nd level loop's index variable will cause data race without collapse (2) or private(j) */ int a[100][100], b[100][100], c[100][100]; int main() { int i,j; #pragma omp parallel for for (i=0;i<100;i++) for (j=0;j<100;j++) a[i][j]=b[i][j]*c[i][j]; return 0; }
GB_unop__identity_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_int16) // op(A') function: GB (_unop_tran__identity_uint32_int16) // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_int16) ( uint32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
image.h
#ifndef SEGM_SEGMIMAGE_H #define SEGM_SEGMIMAGE_H #include <stdexcept> #include <limits> #include "utils/color.h" namespace segm { template<typename T> class Image { public: struct Pixel { int x; int y; Pixel(int _x, int _y) : x(_x), y(_y) { } }; typedef enum : unsigned int { rgb = 0x00, xyz = 0x01, lab = 0x02, ypbpr = 0x03, hsv = 0x04, gray = 0x05, rgchroma = 0x10 } ColorSpace; Image(int width, int height); Image(int width, int height, int bands); Image(int width, int height, int bands, const T *_feat); Image(int width, int height, const T *_feat); Image(int width, int height, int bands, T *_feat, bool alloc = true); Image(int width, int height, T *_feat, bool alloc = true); Image(const Image<T> &image); virtual ~Image(); int getHeight() const { return h; } int getWidth() const { return w; } int getSize() const { return w * h; } int getBands() const { return b; } T *getFeats(int x, int y) const { return &feat[row[y] + pos[x]]; } T *getFeats(int p) const { return &feat[pos[p]]; } T *getFeats() const { return feat; } void setFeats(const T *array); Pixel coord(int p) const { return Pixel(p % w, p / w); } int index(int x, int y) const { return row_index[y] + x; } T max(T a, T b) { return ((a > b) ? a : b); } Image<T> &operator=(const Image<T> &image); T &operator()(int x, int y, int b) { return feat[row[y] + pos[x] + b]; } T operator()(int x, int y, int b) const { return feat[row[y] + pos[x] + b]; } T &operator()(const Pixel &p, int b) { return (*this)(p.x, p.y, b); } T operator()(const Pixel &p, int b) const { return (*this)(p.x, p.y, b); } /* only for 1 band images */ T &operator()(int x, int y) { return feat[row[y] + x]; } T operator()(int x, int y) const { return feat[row[y] + x]; } T &operator()(const Pixel &p) { return T (*this)(p.x, p.y); } T operator()(const Pixel &p) const { return T (*this)(p.x, p.y); } T &operator()(int p) { return feat[p]; } T operator()(int p) const { return feat[p]; } inline bool valid(int x, int y) const { return ((x >= 0 && x < w) && (y >= 0 && y < h)); } inline bool valid(const Pixel &p) const { return valid(p.x, p.y); } Image<T> copy() const; void fill(T value); T max() const; T min() const; int argmax() const; int argmin() const; template<typename U> Image<U> convert() const; Image<double> convert(ColorSpace from, ColorSpace to, double normalization = 1) const; template<typename U> Image<U> rescale() const; bool isChromatic(ColorSpace space) { return (space & 0x10); } protected: int h = 0; /* h */ int w = 0; /* w */ int b = 0; /* bands */ T *feat = nullptr; /* lookup table */ int *row = nullptr; /* w plus band length padding */ int *pos = nullptr; /* band padding only */ int *row_index = nullptr; /* w padding only */ bool allocated = false; /* option to not alloc features, just point to it */ }; template<typename T> Image<T>::Image(int width, int height, int bands) { w = width; h = height; b = bands; allocated = true; feat = new T[w * h * b](); row = new int[h]; row_index = new int[h]; pos = new int[w * h]; for (int i = 0, r = 0; i < h; i++, r += w) { row[i] = r * b; row_index[i] = r; } for (int i = 0, c = 0; i < w * h; i++, c += b) pos[i] = c; } template<typename T> Image<T>::Image(int width, int height, int bands, const T *_feat) { w = width; h = height; b = bands; allocated = true; feat = new T[w * h * b]; row = new int[h]; row_index = new int[h]; pos = new int[w * h]; for (int i = 0, r = 0; i < h; i++, r += w) { row[i] = r * b; row_index[i] = r; } for (int i = 0, c = 0; i < w * h; i++, c += b) pos[i] = c; setFeats(_feat); } template<typename T> Image<T>::Image(int width, int height, int bands, T *_feat, bool alloc) { w = width; h = height; b = bands; if (alloc) { allocated = true; feat = new T[w * h * b]; setFeats(_feat); } else { allocated = false; feat = _feat; } row = new int[h]; row_index = new int[h]; pos = new int[w * h]; for (int i = 0, r = 0; i < h; i++, r += w) { row[i] = r * b; row_index[i] = r; } for (int i = 0, c = 0; i < w * h; i++, c += b) pos[i] = c; } template<typename T> Image<T>::Image(int width, int height) : Image<T>(width, height, 1) { } template<typename T> Image<T>::Image(int width, int height, const T *_feat) : Image<T>(width, height, 1, _feat) { } template<typename T> Image<T>::Image(int width, int height, T *_feat, bool alloc) : Image<T>(width, height, 1, _feat, alloc) { } template<typename T> Image<T>::Image(const Image<T> &image) : Image<T>(image.getWidth(), image.getHeight(), image.getBands(), image.getFeats()) { } template<typename T> Image<T>::~Image() { if (allocated) delete[] feat; delete[] row; delete[] pos; delete[] row_index; }; template<typename T> void Image<T>::setFeats(const T *array) { for (int i = 0; i < w * h * b; i++) feat[i] = array[i]; } template<typename T> Image<T> &Image<T>::operator=(const Image<T> &image) { if (w != image.getWidth() || h != image.getHeight() || b != image.getBands() || !allocated) { if (allocated) delete[] feat; delete[] row; delete[] pos; delete[] row_index; w = image.getWidth(); h = image.getHeight(); b = image.getBands(); allocated = true; feat = new T[w * h * b]; row = new int[h]; row_index = new int[h]; pos = new int[w * h]; for (int i = 0, r = 0; i < h; i++, r += w) { row[i] = r * b; row_index[i] = r; } for (int i = 0, c = 0; i < w * h; i++, c += b) pos[i] = c; } setFeats(image.getFeats()); return (*this); } template<typename T> Image<T> Image<T>::copy() const { Image<T> out(w, h, b, feat); return out; } template<typename T> void Image<T>::fill(T value) { for (int i = 0; i < w * h * b; i++) feat[i] = value; } template<typename T> T Image<T>::max() const { T maximum = std::numeric_limits<T>::min(); for (int i = 0; i < w * h * b; i++) { if (feat[i] > maximum) maximum = feat[i]; } return maximum; } template<typename T> T Image<T>::min() const { T minimum = std::numeric_limits<T>::max(); for (int i = 0; i < w * h * b; i++) { if (feat[i] < minimum) minimum = feat[i]; } return minimum; } template<typename T> int Image<T>::argmax() const { T maximum = std::numeric_limits<T>::min(); int p = -1; for (int i = 0; i < w * h * b; i++) { if (feat[i] > maximum) { maximum = feat[i]; p = i; } } return p; } template<typename T> int Image<T>::argmin() const { T minimum = std::numeric_limits<T>::max(); int p = -1; for (int i = 0; i < w * h * b; i++) { if (feat[i] < minimum) { minimum = feat[i]; p = i; } } return p; } template<typename T> template<typename U> Image<U> Image<T>::convert() const { Image<U> out(w, h, b); for (int p = 0; p < w * h * b; p++) { out(p) = static_cast<U>(feat[p]); } return out; } template<typename T> Image<double> Image<T>::convert(Image<T>::ColorSpace from, Image<T>::ColorSpace to, double normalization) const { if (from == to) return convert<double>(); void (*convFun)(const double *, double*) = nullptr; unsigned int conversion = (from << 16) | to; switch (conversion) { case (rgb << 16) | xyz: convFun = rgb2xyz; break; case (xyz << 16) | rgb: convFun = xyz2rgb; break; case (xyz << 16) | lab: convFun = xyz2lab; break; case (lab << 16) | xyz: convFun = lab2xyz; break; case (rgb << 16) | lab: convFun = rgb2lab; break; case (lab << 16) | rgb: convFun = lab2rgb; break; case (rgb << 16) | ypbpr: convFun = rgb2ypbpr; break; case (ypbpr << 16) | rgb: convFun = ypbpr2rgb; break; case (rgb << 16) | hsv: convFun = rgb2hsv; break; case (hsv << 16) | rgb: convFun = hsv2rgb; break; case (rgb << 16) | gray: convFun = rgb2gray; break; case (gray << 16) | rgb: convFun = gray2rgb; break; case (rgb << 16) | rgchroma: convFun = rgb2rgchroma; break; default: throw std::invalid_argument("Color conversion requested not found"); } double dbl_feat[3]; Image<double> out(w, h, ((to != gray) ? 3 : 1)); for (int i = 0, p = 0; i < w * h; i++) { switch (from) { case gray: dbl_feat[0] = feat[p] / normalization; convFun(dbl_feat, out.getFeats(p)); p++; break; default: dbl_feat[0] = feat[p] / normalization; dbl_feat[1] = feat[p + 1] / normalization; dbl_feat[2] = feat[p + 2] / normalization; convFun(dbl_feat, out.getFeats(i)); p += 3; break; } } return out; } template<typename T> template<typename U> Image<U> Image<T>::rescale() const { Image<U> out(w, h, b); #ifdef _OPENMP #pragma omp parallel for #endif for (int bb = 0; bb < b; bb++) { T max = std::numeric_limits<T>::min(); T min = std::numeric_limits<T>::max(); for (int i = 0; i < w; i++) { for (int j = 0; j < h; j++) { T val = (*this)(i, j, bb); if (val > max) max = val; if (val < min) min = val; } } for (int i = 0; i < w; i++) { for (int j = 0; j < h; j++) { out(i, j, bb) = static_cast<U>((((*this)(i, j, bb) - min) / max)); } } } return out; } } #endif //SEGM_SEGMIMAGE_H
GB_dense_subassign_06d_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_06d_template: C<A> = A where C is dense or bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C and A //-------------------------------------------------------------------------- ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ai = A->i ; const int8_t *GB_RESTRICT Ab = A->b ; const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; const int64_t avlen = A->vlen ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_is_dense = GB_as_if_full (A) ; const int64_t anz = GB_NNZ_HELD (A) ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; int8_t *GB_RESTRICT Cb = C->b ; const int64_t cvlen = C->vlen ; const bool C_is_bitmap = GB_IS_BITMAP (C) ; //-------------------------------------------------------------------------- // C<A> = A //-------------------------------------------------------------------------- int64_t cnvals = C->nvals ; // for C bitmap if (A_is_dense) { //---------------------------------------------------------------------- // A is dense: all entries present //---------------------------------------------------------------------- if (C_is_bitmap) { //------------------------------------------------------------------ // C is bitmap, A is dense //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with C bitmap, A dense int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } GB_memset (Cb, 1, anz, nthreads) ; cnvals = anz ; } else { // C<A>=A with C bitmap, A dense int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static)\ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t pA_start, pA_end, task_cnvals = 0 ; GB_PARTITION (pA_start, pA_end, anz, tid, nthreads) ; for (int64_t p = pA_start ; p < pA_end ; p++) { if (GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } } else { //------------------------------------------------------------------ // C is hypersparse, sparse, or full, with all entries present //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with C sparse/hyper/full int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } else { // C<A>=A with C sparse/hyper/full int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } } } } else if (A_is_bitmap) { //---------------------------------------------------------------------- // A is bitmap //---------------------------------------------------------------------- if (C_is_bitmap) { //------------------------------------------------------------------ // C is bitmap, A is bitmap //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with A and C bitmap int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static)\ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t pA_start, pA_end, task_cnvals = 0 ; GB_PARTITION (pA_start, pA_end, anz, tid, nthreads) ; for (int64_t p = pA_start ; p < pA_end ; p++) { if (Ab [p]) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } else { // C<A>=A with A and C bitmap int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static)\ reduction(+:cnvals) for (tid = 0 ; tid < nthreads ; tid++) { int64_t pA_start, pA_end, task_cnvals = 0 ; GB_PARTITION (pA_start, pA_end, anz, tid, nthreads) ; for (int64_t p = pA_start ; p < pA_end ; p++) { if (Ab [p] && GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } cnvals += task_cnvals ; } } } else { //------------------------------------------------------------------ // C is hypersparse, sparse, or full, with all entries present //------------------------------------------------------------------ if (Mask_struct) { // C<A,struct>=A with A bitmap, and C hyper/sparse/full // this method is used by LAGraph_bfs_parent when q is // a bitmap and pi is full. int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { // Cx [p] = Ax [p] if (Ab [p]) { GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } } else { // C<A>=A with A bitmap, and C hyper/sparse/full int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (Ab [p] && GB_AX_MASK (Ax, p, asize)) { // Cx [p] = Ax [p] GB_COPY_A_TO_C (Cx, p, Ax, p) ; } } } } } else { //---------------------------------------------------------------------- // A is hypersparse or sparse; C is dense or a bitmap //---------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (taskid = 0 ; taskid < ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; int64_t task_cnvals = 0 ; //------------------------------------------------------------------ // C<A(:,kfirst:klast)> = A(:,kfirst:klast) //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // find the part of A(:,k) to be operated on by this task //-------------------------------------------------------------- int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_slice, Ap, avlen) ; // pC points to the start of C(:,j) if C is dense or bitmap int64_t pC = j * cvlen ; //-------------------------------------------------------------- // C<A(:,j)> = A(:,j) //-------------------------------------------------------------- if (Mask_struct) { if (C_is_bitmap) { // C<A,struct>=A with C bitmap, A sparse GB_PRAGMA_SIMD_VECTORIZE for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t p = pC + GBI (Ai, pA, avlen) ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } else { // C<A,struct>=A with C full, A sparse GB_PRAGMA_SIMD_VECTORIZE for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t p = pC + GBI (Ai, pA, avlen) ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; } } } else { if (C_is_bitmap) { // C<A,struct>=A with C bitmap, A sparse GB_PRAGMA_SIMD_VECTORIZE for (int64_t pA = pA_start ; pA < pA_end ; pA++) { if (GB_AX_MASK (Ax, pA, asize)) { int64_t p = pC + GBI (Ai, pA, avlen) ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; task_cnvals += (Cb [p] == 0) ; Cb [p] = 1 ; } } } else { // C<A,struct>=A with C dense, A sparse GB_PRAGMA_SIMD_VECTORIZE for (int64_t pA = pA_start ; pA < pA_end ; pA++) { if (GB_AX_MASK (Ax, pA, asize)) { int64_t p = pC + GBI (Ai, pA, avlen) ; // Cx [p] = Ax [pA] GB_COPY_A_TO_C (Cx, p, Ax, pA) ; } } } } } cnvals += task_cnvals ; } } //-------------------------------------------------------------------------- // log the number of entries in the C bitmap //-------------------------------------------------------------------------- if (C_is_bitmap) { C->nvals = cnvals ; } }
GB_binop__second_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_fp64 // A.*B function (eWiseMult): GB_AemultB__second_fp64 // A*D function (colscale): GB_AxD__second_fp64 // D*A function (rowscale): GB_DxB__second_fp64 // C+=B function (dense accum): GB_Cdense_accumB__second_fp64 // C+=b function (dense accum): GB_Cdense_accumb__second_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_fp64 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_fp64 // C=A'+scalar GB_bind2nd_tran__second_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = bij #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_FP64 || GxB_NO_SECOND_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
deconv_2d.h
// Copyright 2018 The MACE Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_OPS_DECONV_2D_H_ #define MACE_OPS_DECONV_2D_H_ #include <algorithm> #include <string> #include <vector> #include "mace/core/operator.h" #include "mace/core/types.h" #include "mace/ops/activation.h" #include "mace/ops/common/conv_pool_2d_util.h" namespace mace { namespace ops { enum FrameworkType { TENSORFLOW = 0, CAFFE = 1, }; class Deconv2dOpBase : public Operation { public: explicit Deconv2dOpBase(OpConstructContext *context) : Operation(context), strides_(Operation::GetRepeatedArgs<int>("strides")), padding_type_(static_cast<Padding>(Operation::GetOptionalArg<int>( "padding", static_cast<int>(SAME)))), paddings_(Operation::GetRepeatedArgs<int>("padding_values")), group_(Operation::GetOptionalArg<int>("group", 1)), model_type_(static_cast<ops::FrameworkType>( Operation::GetOptionalArg<int>("framework_type", 0))), activation_(ops::StringToActivationType( Operation::GetOptionalArg<std::string>("activation", "NOOP"))), relux_max_limit_( Operation::GetOptionalArg<float>("max_limit", 0.0f)), leakyrelu_coefficient_( Operation::GetOptionalArg<float>("leakyrelu_coefficient", 0.0f)) {} static void CalcDeconvShape_Caffe( const index_t *input_shape, // NHWC const index_t *filter_shape, // OIHW const int *strides, const int *out_paddings, const int group, int *in_paddings, index_t *out_shape, index_t *padded_out_shape, const bool isNCHW = false) { MACE_CHECK_NOTNULL(out_paddings); MACE_CHECK_NOTNULL(input_shape); MACE_CHECK_NOTNULL(filter_shape); MACE_CHECK_NOTNULL(strides); const index_t in_height = isNCHW ? input_shape[2] : input_shape[1]; const index_t in_width = isNCHW ? input_shape[3] : input_shape[2]; const index_t output_channel = filter_shape[0] * group; const index_t kernel_h = filter_shape[2]; const index_t kernel_w = filter_shape[3]; index_t padded_out_height = (in_height - 1) * strides[0] + kernel_h; index_t padded_out_width = (in_width - 1) * strides[1] + kernel_w; if (in_paddings != nullptr) { in_paddings[0] = static_cast<int>((kernel_h - 1) * 2 - out_paddings[0]); in_paddings[1] = static_cast<int>((kernel_w - 1) * 2 - out_paddings[1]); in_paddings[0] = std::max<int>(0, in_paddings[0]); in_paddings[1] = std::max<int>(0, in_paddings[1]); } if (padded_out_shape != nullptr) { padded_out_shape[0] = input_shape[0]; padded_out_shape[1] = isNCHW ? output_channel : padded_out_height; padded_out_shape[2] = isNCHW ? padded_out_height : padded_out_width; padded_out_shape[3] = isNCHW ? padded_out_width : output_channel; } if (out_shape != nullptr) { index_t out_height = padded_out_height - out_paddings[0]; index_t out_width = padded_out_width - out_paddings[1]; out_shape[0] = input_shape[0]; out_shape[1] = isNCHW ? output_channel : out_height; out_shape[2] = isNCHW ? out_height : out_width; out_shape[3] = isNCHW ? out_width : output_channel; } } static void CalcDeconvShape_TF( const index_t *input_shape, // NHWC const index_t *filter_shape, // OIHW const index_t *output_shape, const int *strides, const int group, Padding padding_type, int *in_paddings, int *out_paddings, index_t *padded_out_shape, const bool isNCHW = false) { MACE_CHECK_NOTNULL(output_shape); MACE_CHECK_NOTNULL(input_shape); MACE_CHECK_NOTNULL(filter_shape); MACE_CHECK_NOTNULL(strides); const index_t in_height = isNCHW ? input_shape[2] : input_shape[1]; const index_t in_width = isNCHW ? input_shape[3] : input_shape[2]; const index_t out_height = isNCHW ? output_shape[2] : output_shape[1]; const index_t out_width = isNCHW ? output_shape[3] : output_shape[2]; const index_t extended_in_height = (in_height - 1) * strides[0] + 1; const index_t extended_in_width = (in_width - 1) * strides[1] + 1; const index_t kernel_h = filter_shape[2]; const index_t kernel_w = filter_shape[3]; index_t expected_input_height = 0, expected_input_width = 0; switch (padding_type) { case VALID: expected_input_height = (out_height - kernel_h + strides[0]) / strides[0]; expected_input_width = (out_width - kernel_w + strides[1]) / strides[1]; break; case SAME: expected_input_height = (out_height + strides[0] - 1) / strides[0]; expected_input_width = (out_width + strides[1] - 1) / strides[1]; break; default: MACE_CHECK(false, "Unsupported padding type: ", padding_type); } MACE_CHECK(expected_input_height == in_height, expected_input_height, "!=", in_height); MACE_CHECK(expected_input_width == in_width, expected_input_width, "!=", in_width); const index_t padded_out_height = (in_height - 1) * strides[0] + kernel_h; const index_t padded_out_width = (in_width - 1) * strides[1] + kernel_w; if (in_paddings != nullptr) { const int p_h = static_cast<int>(out_height + kernel_h - 1 - extended_in_height); const int p_w = static_cast<int>(out_width + kernel_w - 1 - extended_in_width); in_paddings[0] = std::max<int>(0, p_h); in_paddings[1] = std::max<int>(0, p_w); } if (out_paddings != nullptr) { const int o_p_h = static_cast<int>(padded_out_height - out_height); const int o_p_w = static_cast<int>(padded_out_width - out_width); out_paddings[0] = std::max<int>(0, o_p_h); out_paddings[1] = std::max<int>(0, o_p_w); } if (padded_out_shape != nullptr) { index_t output_channel = filter_shape[0] * group; padded_out_shape[0] = output_shape[0]; padded_out_shape[1] = isNCHW ? output_channel : padded_out_height; padded_out_shape[2] = isNCHW ? padded_out_height : padded_out_width; padded_out_shape[3] = isNCHW ? padded_out_width : output_channel; } } protected: std::vector<int> strides_; // [stride_h, stride_w] const Padding padding_type_; std::vector<int> paddings_; const int group_; const FrameworkType model_type_; const ActivationType activation_; const float relux_max_limit_; const float leakyrelu_coefficient_; }; template <typename T> void CropPadOut(const T *input, const index_t *in_shape, const index_t *out_shape, const index_t pad_h, const index_t pad_w, T *output) { const index_t batch = in_shape[0]; const index_t channel = in_shape[1]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; #pragma omp parallel for collapse(3) for (int i = 0; i < batch; ++i) { for (int j = 0; j < channel; ++j) { for (int k = 0; k < out_height; ++k) { const T *input_base = input + ((i * channel + j) * in_height + (k + pad_h)) * in_width; T *output_base = output + ((i * channel + j) * out_height + k)* out_width; memcpy(output_base, input_base + pad_w, out_width * sizeof(T)); } } } } } // namespace ops } // namespace mace #endif // MACE_OPS_DECONV_2D_H_
MicroMlpAffine.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <cstdint> #include <random> #include "bb/Manager.h" #include "bb/SparseModel.h" #include "bb/ShuffleSet.h" namespace bb { // Mini-MLP Affine template <int N = 6, int M = 16, typename FXT = float, typename T = float> class MicroMlpAffine : public SparseModel { using _super = SparseModel; public: static inline std::string ModelName(void) { return "MicroMlpAffine" + std::to_string(N) + "_" + std::to_string(M); } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FXT>::Name() + "_" + DataType<T>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: public: // debug bool m_binary_mode = false; bool m_host_only = false; bool m_host_simd = false; std::string m_connection; T m_initialize_std = (T)0.01; std::string m_initializer = "he"; std::mt19937_64 m_mt; index_t m_input_node_size = 0; index_t m_output_node_size = 0; indices_t m_input_shape; indices_t m_output_shape; Tensor_<std::int32_t> m_input_index; std::shared_ptr<Tensor> m_W0; std::shared_ptr<Tensor> m_b0; std::shared_ptr<Tensor> m_dW0; std::shared_ptr<Tensor> m_db0; std::shared_ptr<Tensor> m_W1; std::shared_ptr<Tensor> m_b1; std::shared_ptr<Tensor> m_dW1; std::shared_ptr<Tensor> m_db1; public: FrameBuffer m_x_buf; protected: MicroMlpAffine() { m_W0 = std::make_shared<Tensor>(); m_b0 = std::make_shared<Tensor>(); m_dW0 = std::make_shared<Tensor>(); m_db0 = std::make_shared<Tensor>(); m_W1 = std::make_shared<Tensor>(); m_b1 = std::make_shared<Tensor>(); m_dW1 = std::make_shared<Tensor>(); m_db1 = std::make_shared<Tensor>(); } void CommandProc(std::vector<std::string> args) { // バイナリモード設定 if ( args.size() == 2 && args[0] == "binary" ) { m_binary_mode = EvalBool(args[1]); } // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } // Host SIMDモード設定 if (args.size() == 2 && args[0] == "host_simd") { m_host_simd = EvalBool(args[1]); } } public: ~MicroMlpAffine() {} struct create_t { indices_t output_shape; std::string connection; T initialize_std = (T)0.01; std::string initializer = ""; std::uint64_t seed = 1; }; static std::shared_ptr<MicroMlpAffine> Create(create_t const &create) { auto self = std::shared_ptr<MicroMlpAffine>(new MicroMlpAffine); BB_ASSERT(!create.output_shape.empty()); self->m_initialize_std = create.initialize_std; self->m_initializer = create.initializer; self->m_mt.seed(create.seed); self->m_output_shape = create.output_shape; self->m_output_node_size = CalcShapeSize(self->m_output_shape); self->m_connection = create.connection; return self; } static std::shared_ptr<MicroMlpAffine> Create(indices_t const &output_shape, std::string connection = "", std::uint64_t seed = 1) { create_t create; create.output_shape = output_shape; create.connection = connection; create.seed = seed; return Create(create); } static std::shared_ptr<MicroMlpAffine> Create(index_t output_node_size, std::string connection = "", std::uint64_t seed = 1) { create_t create; create.output_shape.resize(1); create.output_shape[0] = output_node_size; create.connection = connection; create.seed = seed; return Create(create); } static std::shared_ptr<MicroMlpAffine> Create(void) { return Create(create_t()); } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_binary_mode); bb::SaveValue(os, m_host_simd); bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_connection); bb::SaveValue(os, m_initialize_std); bb::SaveValue(os, m_initializer); bb::SaveValue(os, m_input_shape); bb::SaveValue(os, m_output_shape); m_input_index.DumpObject(os); m_W0->DumpObject(os); m_b0->DumpObject(os); m_W1->DumpObject(os); m_b1->DumpObject(os); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_binary_mode); bb::LoadValue(is, m_host_simd); bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_connection); bb::LoadValue(is, m_initialize_std); bb::LoadValue(is, m_initializer); bb::LoadValue(is, m_input_shape); bb::LoadValue(is, m_output_shape); m_input_index.LoadObject(is); m_W0->LoadObject(is); m_b0->LoadObject(is); m_W1->LoadObject(is); m_b1->LoadObject(is); // 再構築 m_input_node_size = CalcShapeSize(m_input_shape); m_output_node_size = CalcShapeSize(m_output_shape); m_dW0->Resize(m_W0->GetShape(), m_W0->GetType()); m_db0->Resize(m_b0->GetShape(), m_b0->GetType()); m_dW1->Resize(m_W1->GetShape(), m_W1->GetType()); m_db1->Resize(m_b1->GetShape(), m_b1->GetType()); } public: // Serialize(旧) void Save(std::ostream &os) const { SaveIndex(os, m_input_node_size); SaveIndex(os, m_output_node_size); SaveIndices(os, m_input_shape); SaveIndices(os, m_output_shape); m_input_index.Save(os); m_W0->Save(os); m_b0->Save(os); m_W1->Save(os); m_b1->Save(os); } void Load(std::istream &is) { m_input_node_size = LoadIndex(is); m_output_node_size = LoadIndex(is); m_input_shape = LoadIndices(is); m_output_shape = LoadIndices(is); m_input_index.Load(is); m_W0->Load(is); m_b0->Load(is); m_W1->Load(is); m_b1->Load(is); m_dW0->Resize(m_W0->GetShape(), m_W0->GetType()); m_db0->Resize(m_b0->GetShape(), m_b0->GetType()); m_dW1->Resize(m_W1->GetShape(), m_W1->GetType()); m_db1->Resize(m_b1->GetShape(), m_b1->GetType()); } #ifdef BB_WITH_CEREAL template <class Archive> void save(Archive& archive, std::uint32_t const version) const { _super::save(archive, version); archive(cereal::make_nvp("input_node_size", m_input_node_size)); archive(cereal::make_nvp("output_node_size", m_output_node_size)); archive(cereal::make_nvp("input_shape", m_input_shape)); archive(cereal::make_nvp("output_shape", m_output_shape)); archive(cereal::make_nvp("input_index", m_input_index)); archive(cereal::make_nvp("W0", *m_W0)); archive(cereal::make_nvp("b0", *m_b0)); archive(cereal::make_nvp("W1", *m_W1)); archive(cereal::make_nvp("b1", *m_b1)); // archive(cereal::make_nvp("dW0", *m_dW0)); // archive(cereal::make_nvp("db0", *m_db0)); // archive(cereal::make_nvp("dW1", *m_dW1)); // archive(cereal::make_nvp("db1", *m_db1)); } template <class Archive> void load(Archive& archive, std::uint32_t const version) { _super::load(archive, version); archive(cereal::make_nvp("input_node_size", m_input_node_size)); archive(cereal::make_nvp("output_node_size", m_output_node_size)); archive(cereal::make_nvp("input_shape", m_input_shape)); archive(cereal::make_nvp("output_shape", m_output_shape)); archive(cereal::make_nvp("input_index", m_input_index)); archive(cereal::make_nvp("W0", *m_W0)); archive(cereal::make_nvp("b0", *m_b0)); archive(cereal::make_nvp("W1", *m_W1)); archive(cereal::make_nvp("b1", *m_b1)); // archive(cereal::make_nvp("dW0", *m_dW0)); // archive(cereal::make_nvp("db0", *m_db0)); // archive(cereal::make_nvp("dW1", *m_dW1)); // archive(cereal::make_nvp("db1", *m_db1)); } void Save(cereal::JSONOutputArchive& archive) const { archive(cereal::make_nvp("MicroMlpAffine", *this)); } void Load(cereal::JSONInputArchive& archive) { archive(cereal::make_nvp("MicroMlpAffine", *this)); } #endif Tensor &W0(void) { return *m_W0; } Tensor const &W0(void) const { return *m_W0; } Tensor &b0(void) { return *m_b0; } Tensor const &b0(void) const { return *m_b0; } Tensor &W1(void) { return *m_W1; } Tensor const &W1(void) const { return *m_W1; } Tensor &b1(void) { return *m_b1; } Tensor const &b1(void) const { return *m_b1; } Tensor &dW0(void) { return *m_dW0; } Tensor const &dW0(void) const { return *m_dW0; } Tensor &db0(void) { return *m_db0; } Tensor const &db0(void) const { return *m_db0; } Tensor &dW1(void) { return *m_dW1; } Tensor const &dW1(void) const { return *m_dW1; } Tensor &db1(void) { return *m_db1; } Tensor const &db1(void) const { return *m_db1; } auto lock_InputIndex(void) { return m_input_index.Lock(); } auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); } auto lock_W0(void) { return m_W0->Lock<T>(); } auto lock_W0_const(void) const { return m_W0->LockConst<T>(); } auto lock_b0(void) { return m_b0->Lock<T>(); } auto lock_b0_const(void) const { return m_b0->LockConst<T>(); } auto lock_W1(void) { return m_W1->Lock<T>(); } auto lock_W1_const(void) const { return m_W1->LockConst<T>(); } auto lock_b1(void) { return m_b1->Lock<T>(); } auto lock_b1_const(void) const { return m_b1->LockConst<T>(); } auto lock_dW0(void) { return m_dW0->Lock<T>(); } auto lock_dW0_const(void) const { return m_dW0->LockConst<T>(); } auto lock_db0(void) { return m_db0->Lock<T>(); } auto lock_db0_const(void) const { return m_db0->LockConst<T>(); } auto lock_dW1(void) { return m_dW1->Lock<T>(); } auto lock_dW1_const(void) const { return m_dW1->LockConst<T>(); } auto lock_db1(void) { return m_db1->Lock<T>(); } auto lock_db1_const(void) const { return m_db1->LockConst<T>(); } index_t GetNodeConnectionSize(index_t node) const { return N; } void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) { auto ptr = lock_InputIndex(); ptr(node, input_index) = (std::int32_t)input_node; } index_t GetNodeConnectionIndex(index_t node, index_t input_index) const { auto ptr = lock_InputIndex_const(); return (index_t)ptr(node, input_index); } /** * @brief 入力のshape設定 * @detail 入力のshape設定 * @param shape 新しいshape * @return なし */ indices_t SetInputShape(indices_t shape) { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } // 形状設定 m_input_shape = shape; m_input_node_size = CalcShapeSize(shape); // 接続初期化 m_input_index.Resize(m_output_node_size, N); this->InitializeNodeInput(m_mt(), m_connection); // パラメータ初期化 m_W0->Resize({m_output_node_size, M, N}, DataType<T>::type); m_b0->Resize({m_output_node_size, M}, DataType<T>::type); m_W1->Resize({m_output_node_size, M}, DataType<T>::type); m_b1->Resize({m_output_node_size}, DataType<T>::type); m_dW0->Resize({m_output_node_size, M, N}, DataType<T>::type); m_db0->Resize({m_output_node_size, M}, DataType<T>::type); m_dW1->Resize({m_output_node_size, M}, DataType<T>::type); m_db1->Resize({m_output_node_size}, DataType<T>::type); if (m_initializer == "he" || m_initializer == "He") { m_initialize_std = (T)std::sqrt((double)2.0 / (double)N); m_W0->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b0->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_W1->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b1->InitNormalDistribution(0.0, m_initialize_std, m_mt()); } else if (m_initializer == "xavier" || m_initializer == "Xavier" ) { m_initialize_std = (T)std::sqrt((double)1.0 / (double)N); m_W0->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b0->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_W1->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b1->InitNormalDistribution(0.0, m_initialize_std, m_mt()); } else if (m_initializer == "normal" || m_initializer == "Normal" ) { m_W0->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b0->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_W1->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b1->InitNormalDistribution(0.0, m_initialize_std, m_mt()); } else if (m_initializer == "uniform" || m_initializer == "Uniform" ) { double k = (double)m_initialize_std * std::sqrt(3.0); m_W0->InitUniformDistribution(-k, +k, m_mt()); m_b0->InitUniformDistribution(-k, +k, m_mt()); m_W1->InitUniformDistribution(-k, +k, m_mt()); m_b1->InitUniformDistribution(-k, +k, m_mt()); } else { double k = std::sqrt(1.0 / (double)N); m_W0->InitUniformDistribution(-k, +k, m_mt()); m_b0->InitUniformDistribution(-k, +k, m_mt()); m_W1->InitUniformDistribution(-k, +k, m_mt()); m_b1->InitUniformDistribution(-k, +k, m_mt()); } m_dW0->FillZero(); m_db0->FillZero(); m_dW1->FillZero(); m_db1->FillZero(); return m_output_shape; } /** * @brief 出力のshape設定 * @detail 出力のshape設定 * 出力ノード数が変わらない限りshpeは自由 * @param shape 新しいshape * @return なし */ void SetOutputShape(indices_t const &shape) { BB_ASSERT(CalcShapeSize(shape) == m_output_node_size); m_output_shape = shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } Variables GetParameters(void) { Variables parameters; parameters.PushBack(m_W0); parameters.PushBack(m_b0); parameters.PushBack(m_W1); parameters.PushBack(m_b1); return parameters; } Variables GetGradients(void) { Variables gradients; gradients.PushBack(m_dW0); gradients.PushBack(m_db0); gradients.PushBack(m_dW1); gradients.PushBack(m_db1); return gradients; } void SetFrameBufferX(FrameBuffer x) { m_x_buf = x; } FrameBuffer GetFrameBufferX(void) { return m_x_buf; } // ノード単位でのForward計算 std::vector<double> ForwardNode(index_t node, std::vector<double> input_value) const { auto W0 = lock_W0_const(); auto b0 = lock_b0_const(); auto W1 = lock_W1_const(); auto b1 = lock_b1_const(); // affine0 std::vector<T> value0(M); for (std::size_t i = 0; i < M; ++i) { value0[i] = b0(node, i); for (std::size_t j = 0; j < N; ++j) { value0[i] += (T)input_value[j] * W0(node, i, j); } } // ReLU for (std::size_t i = 0; i < M; ++i) { value0[i] = std::max(value0[i], (T)0.0); } // affine1 std::vector<T> value1(1); value1[0] = b1(node); for (std::size_t i = 0; i < M; ++i) { value1[0] = value1[0] + value0[i] * W1(node, i); } // 型変換 std::vector<double> value2(M); for (std::size_t i = 0; i < M; ++i) { value2[i] = (double)value1[i]; } return value2; } FrameBuffer Forward(FrameBuffer x_buf, bool train = true) { BB_ASSERT(x_buf.GetType() == DataType<FXT>::type); // SetInputShpaeされていなければ初回に設定 if ( x_buf.GetNodeSize() != m_input_node_size) { SetInputShape(x_buf.GetShape()); } // backwardの為に保存 if ( train ) { m_x_buf = x_buf; } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<T>::type); // バイナリモードならパラメータクリップ if (m_binary_mode) { m_W0->Clamp(-1.0, +1.0); m_b0->Clamp(-1.0, +1.0); m_W1->Clamp(-1.0, +1.0); m_b1->Clamp(-1.0, +1.0); } #ifdef BB_WITH_CUDA // FP32 CUDA版 if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(); auto W0_ptr = m_W0->LockDeviceMemoryConst(); auto b0_ptr = m_b0->LockDeviceMemoryConst(); auto W1_ptr = m_W1->LockDeviceMemoryConst(); auto b1_ptr = m_b1->LockDeviceMemoryConst(); bbcu_fp32_MicroMlp6x16_Forward ( (float const *)x_ptr.GetAddr(), (float *)y_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (float const *)W0_ptr.GetAddr(), (float const *)b0_ptr.GetAddr(), (float const *)W1_ptr.GetAddr(), (float const *)b1_ptr.GetAddr(), (int )m_input_node_size, (int )m_output_node_size, (int )x_buf.GetFrameSize(), (int )(x_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif #ifdef BB_WITH_CUDA // Bit CUDA版 if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_BIT && DataType<T>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(); auto W0_ptr = m_W0->LockDeviceMemoryConst(); auto b0_ptr = m_b0->LockDeviceMemoryConst(); auto W1_ptr = m_W1->LockDeviceMemoryConst(); auto b1_ptr = m_b1->LockDeviceMemoryConst(); bbcu_bit_fp32_MicroMlp6x16_Forward ( (int const *)x_ptr.GetAddr(), (float *)y_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (float const *)W0_ptr.GetAddr(), (float const *)b0_ptr.GetAddr(), (float const *)W1_ptr.GetAddr(), (float const *)b1_ptr.GetAddr(), (int )m_input_node_size, (int )m_output_node_size, (int )x_buf.GetFrameSize(), (int )(x_buf.GetFrameStride() / sizeof(float)), (int )(y_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif // AVX版 if ( DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32 && m_host_simd ) { const index_t frame_size = x_buf.GetFrameStride() / sizeof(float); const __m256 zero = _mm256_set1_ps(0); auto x_ptr = x_buf.LockMemoryConst(); auto y_ptr = y_buf.LockMemory(); auto input_index_ptr = m_input_index.LockConst(); auto W0_ptr = lock_W0_const(); auto b0_ptr = lock_b0_const(); auto W1_ptr = lock_W1_const(); auto b1_ptr = lock_b1_const(); auto in_sig_buf = (float const *)x_ptr.GetAddr(); auto out_sig_buf = (float *)y_ptr.GetAddr(); #pragma omp parallel for for (index_t node = 0; node < m_output_node_size; ++node) { __m256 W0[M][N]; __m256 b0[M]; __m256 W1[M]; __m256 b1; for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { W0[i][j] = _mm256_set1_ps(W0_ptr(node, i, j)); } b0[i] = _mm256_set1_ps(b0_ptr(node, i)); W1[i] = _mm256_set1_ps(W1_ptr(node, i)); } b1 = _mm256_set1_ps(b1_ptr(node)); float const *in_sig_ptr[N]; float *out_sig_ptr; for (int i = 0; i < N; ++i) { in_sig_ptr[i] = &in_sig_buf[input_index_ptr(node, i) * frame_size]; } out_sig_ptr = &out_sig_buf[node * frame_size]; for (index_t frame = 0; frame < frame_size; frame += 8) { __m256 in_sig[N]; for (int i = 0; i < N; ++i) { in_sig[i] = _mm256_load_ps(&in_sig_ptr[i][frame]); } __m256 sum1 = b1; for (int i = 0; i < M; ++i) { // sub-layer0 __m256 sum0 = b0[i]; for (int j = 0; j < N; ++j) { sum0 = _mm256_fmadd_ps(in_sig[j], W0[i][j], sum0); } // ReLU sum0 = _mm256_max_ps(sum0, zero); // sub-layer1 sum1 = _mm256_fmadd_ps(sum0, W1[i], sum1); } _mm256_store_ps(&out_sig_ptr[frame], sum1); } } return y_buf; } { // 汎用版 auto frame_size = x_buf.GetFrameSize(); auto x_ptr = x_buf.LockConst<FXT>(); auto y_ptr = y_buf.Lock<T>(); auto input_index_ptr = m_input_index.LockConst(); auto W0_ptr = lock_W0_const(); auto b0_ptr = lock_b0_const(); auto W1_ptr = lock_W1_const(); auto b1_ptr = lock_b1_const(); #pragma omp parallel for for ( index_t node = 0; node < m_output_node_size; ++node ) { index_t in_idx[N]; for ( int i = 0; i < N; ++i) { in_idx[i] = input_index_ptr(node, i); } for (index_t frame = 0; frame < frame_size; ++frame ) { T in_sig[N]; for ( int i = 0; i < N; ++i) { in_sig[i] = (T)x_ptr.Get(frame, in_idx[i]); } T sum1 = b1_ptr(node); for (int i = 0; i < M; ++i) { // sub-layer0 T sum0 = b0_ptr(node, i); for (int j = 0; j < N; ++j) { sum0 += in_sig[j] * W0_ptr(node, i, j); } // ReLU sum0 = sum0 > (T)0 ? sum0 : (T)0; // sub-layer1 sum1 += sum0 * W1_ptr(node, i); } y_ptr.Set(frame, node, sum1); } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { return FrameBuffer(); } BB_ASSERT(dy_buf.GetType() == DataType<T>::type); // forward時データ取り出し FrameBuffer x_buf = m_x_buf; m_x_buf = FrameBuffer(); BB_ASSERT(x_buf.GetType() == DataType<FXT>::type); // 出力設定 FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<T>::type); // CUDA版 #ifdef BB_WITH_CUDA if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto dy_ptr = dy_buf.LockDeviceMemoryConst(); auto dx_ptr = dx_buf.LockDeviceMemory(); auto W0_ptr = m_W0->LockDeviceMemoryConst(); auto b0_ptr = m_b0->LockDeviceMemoryConst(); auto W1_ptr = m_W1->LockDeviceMemoryConst(); auto b1_ptr = m_b1->LockDeviceMemoryConst(); auto dW0_ptr = m_dW0->LockDeviceMemory(); auto db0_ptr = m_db0->LockDeviceMemory(); auto dW1_ptr = m_dW1->LockDeviceMemory(); auto db1_ptr = m_db1->LockDeviceMemory(); FrameBuffer dx_tmp(dy_buf.GetFrameSize(), {m_output_node_size * N}, BB_TYPE_FP32); auto dx_tmp_ptr = dx_tmp.LockDeviceMemory(); bbcu_fp32_MicroMlp6x16_Backward ( (float const *)x_ptr.GetAddr(), (float *)dy_ptr.GetAddr(), (float *)dx_ptr.GetAddr(), (float *)dx_tmp_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (float const *)W0_ptr.GetAddr(), (float const *)b0_ptr.GetAddr(), (float *)dW0_ptr.GetAddr(), (float *)db0_ptr.GetAddr(), (float const *)W1_ptr.GetAddr(), (float const *)b1_ptr.GetAddr(), (float *)dW1_ptr.GetAddr(), (float *)db1_ptr.GetAddr(), (int )m_input_node_size, (int )m_output_node_size, (int )dy_buf.GetFrameSize(), (int )dy_buf.GetFrameStride() / sizeof(float) ); return dx_buf; } #endif #ifdef BB_WITH_CUDA if ( N == 6 && M == 16 && DataType<FXT>::type == BB_TYPE_BIT && DataType<T>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto dy_ptr = dy_buf.LockDeviceMemoryConst(); auto dx_ptr = dx_buf.LockDeviceMemory(); auto W0_ptr = m_W0->LockDeviceMemoryConst(); auto b0_ptr = m_b0->LockDeviceMemoryConst(); auto W1_ptr = m_W1->LockDeviceMemoryConst(); auto b1_ptr = m_b1->LockDeviceMemoryConst(); auto dW0_ptr = m_dW0->LockDeviceMemory(); auto db0_ptr = m_db0->LockDeviceMemory(); auto dW1_ptr = m_dW1->LockDeviceMemory(); auto db1_ptr = m_db1->LockDeviceMemory(); FrameBuffer dx_tmp(dy_buf.GetFrameSize(), {m_output_node_size * N}, BB_TYPE_FP32); auto dx_tmp_ptr = dx_tmp.LockDeviceMemory(); bbcu_bit_fp32_MicroMlp6x16_Backward ( (int const *)x_ptr.GetAddr(), (float *)dy_ptr.GetAddr(), (float *)dx_ptr.GetAddr(), (float *)dx_tmp_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (float const *)W0_ptr.GetAddr(), (float const *)b0_ptr.GetAddr(), (float *)dW0_ptr.GetAddr(), (float *)db0_ptr.GetAddr(), (float const *)W1_ptr.GetAddr(), (float const *)b1_ptr.GetAddr(), (float *)dW1_ptr.GetAddr(), (float *)db1_ptr.GetAddr(), (int )m_input_node_size, (int )m_output_node_size, (int )dy_buf.GetFrameSize(), (int )x_buf.GetFrameStride() / sizeof(int), (int )dy_buf.GetFrameStride() / sizeof(float) ); return dx_buf; } #endif // m_dW0->FillZero(); // m_db0->FillZero(); // m_dW1->FillZero(); // m_db1->FillZero(); // AVX版 if ( DataType<FXT>::type == BB_TYPE_FP32 && DataType<T>::type == BB_TYPE_FP32 ) { index_t frame_size = dy_buf.GetFrameStride() / sizeof(float); index_t node_size = m_output_node_size; dx_buf.FillZero(); auto dy_ptr = dy_buf.LockMemoryConst(); auto dx_ptr = dx_buf.LockMemory(); auto x_ptr = x_buf.LockMemoryConst(); auto input_index_ptr = m_input_index.LockConst(); auto W0_ptr = lock_W0_const(); auto b0_ptr = lock_b0_const(); auto W1_ptr = lock_W1_const(); auto b1_ptr = lock_b1_const(); auto dW0_ptr = lock_dW0(); auto db0_ptr = lock_db0(); auto dW1_ptr = lock_dW1(); auto db1_ptr = lock_db1(); auto dy_addr = (float const *)dy_ptr.GetAddr(); auto dx_addr = (float *)dx_ptr.GetAddr(); auto x_addr = (float const *)x_ptr.GetAddr(); const __m256 zero = _mm256_set1_ps(0); FrameBuffer dx_tmp(dy_buf.GetFrameSize(), {m_output_node_size * N}, BB_TYPE_FP32); auto dx_tmp_ptr = dx_tmp.Lock<float>(); #pragma omp parallel for for (int node = 0; node < (int)node_size; ++node) { __m256 W0[M][N]; __m256 b0[M]; __m256 dW0[M][N]; __m256 db0[M]; __m256 W1[M]; __m256 dW1[M]; __m256 db1; for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { W0[i][j] = _mm256_set1_ps(W0_ptr (node, i, j)); dW0[i][j] = _mm256_set1_ps(0.0f); } b0[i] = _mm256_set1_ps(b0_ptr(node, i)); db0[i] = _mm256_set1_ps(0.0f); W1[i] = _mm256_set1_ps(W1_ptr(node, i)); dW1[i] = _mm256_set1_ps(0.0f); } db1 = _mm256_set1_ps(0.0f); float const *out_err_ptr; float const *in_sig_ptr[N]; out_err_ptr = &dy_addr[frame_size * node]; for (int i = 0; i < N; ++i) { in_sig_ptr[i] = &x_addr[frame_size * input_index_ptr(node, i)]; } for (int frame = 0; frame < frame_size; frame += 8) { __m256 in_sig[N]; for (int i = 0; i < N; ++i) { in_sig[i] = _mm256_load_ps(&in_sig_ptr[i][frame]); } // 一層目の信号を再構成 __m256 sig0[M]; for (int i = 0; i < M; ++i) { // sub-layer0 __m256 sum0 = b0[i]; for (int j = 0; j < N; ++j) { sum0 = _mm256_fmadd_ps(in_sig[j], W0[i][j], sum0); } // ReLU sum0 = _mm256_max_ps(sum0, zero); sig0[i] = sum0; } // 逆伝播 __m256 in_err[N]; for (int i = 0; i < N; ++i) { in_err[i] = zero; } __m256 out_err = _mm256_load_ps(&out_err_ptr[frame]); db1 = _mm256_add_ps(db1, out_err); for (int i = 0; i < M; ++i) { __m256 err0 = _mm256_mul_ps(W1[i], out_err); __m256 mask = _mm256_cmp_ps(sig0[i], zero, _CMP_GT_OS); dW1[i] = _mm256_fmadd_ps(sig0[i], out_err, dW1[i]); err0 = _mm256_and_ps(err0, mask); // ReLU db0[i] = _mm256_add_ps(db0[i], err0); for (int j = 0; j < N; ++j) { in_err[j] = _mm256_fmadd_ps(err0, W0[i][j], in_err[j]); dW0[i][j] = _mm256_fmadd_ps(err0, in_sig[j], dW0[i][j]); } } for (int i = 0; i < N; ++i) { float* tmp_dx_addr = dx_tmp_ptr.GetAddr(node * N + i); _mm256_store_ps(&tmp_dx_addr[frame], in_err[i]); } } for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { dW0_ptr(node, i, j) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(dW0[i][j])); } db0_ptr(node, i) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(db0[i])); dW1_ptr(node, i) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(dW1[i])); } db1_ptr(node) += bb_mm256_cvtss_f32(bb_mm256_hsum_ps(db1)); } // 足しこみ for (int node = 0; node < (int)node_size; ++node) { float* in_err_ptr[N]; for (int i = 0; i < N; ++i) { in_err_ptr[i] = &dx_addr[frame_size * input_index_ptr(node, i)]; } #pragma omp parallel for for (int frame = 0; frame < frame_size; frame += 8) { for (int i = 0; i < N; ++i) { __m256 in_err = _mm256_load_ps(&in_err_ptr[i][frame]); float* tmp_dx_addr = dx_tmp_ptr.GetAddr(node * N + i); __m256 tmp_err = _mm256_load_ps(&tmp_dx_addr[frame]); in_err = _mm256_add_ps(in_err, tmp_err); _mm256_store_ps(&in_err_ptr[i][frame], in_err); } } } return dx_buf; } { // 汎用版 index_t frame_size = dy_buf.GetFrameSize(); index_t node_size = m_output_node_size; dx_buf.FillZero(); auto dy_ptr = dy_buf.LockConst<T>(); auto dx_ptr = dx_buf.Lock<T>(); auto x_ptr = x_buf.LockConst<FXT>(); auto input_index_ptr = m_input_index.Lock(); auto W0_ptr = lock_W0_const(); auto b0_ptr = lock_b0_const(); auto W1_ptr = lock_W1_const(); auto b1_ptr = lock_b1_const(); auto dW0_ptr = lock_dW0(); auto db0_ptr = lock_db0(); auto dW1_ptr = lock_dW1(); auto db1_ptr = lock_db1(); // FrameBuffer dx_tmp(dy_buf.GetFrameSize(), m_output_node_size * N, BB_TYPE_FP32); // auto dx_tmp_ptr = dx_tmp.Lock<float>(); // #pragma omp parallel for for (int node = 0; node < (int)node_size; ++node) { float W0[M][N]; float b0[M]; float dW0[M][N]; float db0[M]; float W1[M]; float dW1[M]; float db1; for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { W0[i][j] = W0_ptr(node, i, j); dW0[i][j] = (T)0.0; } b0[i] = b0_ptr(node, i); db0[i] = (T)0.0; W1[i] = W1_ptr(node, i); dW1[i] = (T)0.0; } db1 = (T)0.0; // 1つのSMで1nodeを全フレーム処理 for ( index_t frame = 0; frame < frame_size; ++frame ) { // 入力データ読み込み T x[N]; for ( int i = 0; i < N; ++i ) { x[i] = x_ptr.Get(frame, input_index_ptr(node, i)); } // 1段目再計算して2段目逆伝播 T grad1 = dy_ptr.Get(frame, node); T grad0[M]; db1 += grad1; for ( int i = 0; i < M; ++i ) { T sig0 = b0[i]; for ( int j = 0; j < N; ++j ) { sig0 += x[j] * W0[i][j]; } sig0 = std::max(sig0, (T)0); // ReLU dW1[i] += grad1 * sig0; if ( sig0 > 0 ) { // ReLU grad0[i] = grad1 * W1[i]; } else { grad0[i] = 0; } } // 1段目逆伝播 T dx[N]; for ( int i = 0; i < N; ++i ) { dx[i] = 0; // dx_ptr[frame_stride * i + frame]; } for ( int i = 0; i < M; ++i ) { db0[i] += grad0[i]; for ( int j = 0; j < N; ++j ) { dW0[i][j] += grad0[i] * x[j]; dx[j] += grad0[i] * W0[i][j]; } } // 誤差書き込み for ( int i = 0; i < N; ++i ) { dx_ptr.Add(frame, input_index_ptr(node, i), dx[i]); } } // パラメータ設定 for ( int i = 0; i < M; ++i ) { for ( int j = 0; j < N; ++j ) { dW0_ptr(node, i, j) += dW0[i][j]; } db0_ptr(node, i) += db0[i]; dW1_ptr(node, i) += dW1[i]; } db1_ptr(node) = db1; } return dx_buf; } } }; }
declare-simd-fix.h
#ifndef LLVM_CLANG_TEST_OPENMP_INPUTS_DECLARE_SIMD_FIX_H #define LLVM_CLANG_TEST_OPENMP_INPUTS_DECLARE_SIMD_FIX_H #pragma omp declare simd float foo(float a, float b, int c); float bar(float a, float b, int c); #endif
qsort_arg_mt.c
/* * Imported from PostgreSQL sources by Teodor Sigaev <teodor@sigaev.ru>, <sigaev@corp.mail.ru> */ /* * qsort_arg.c: qsort with a passthrough "void *" argument * * Modifications from vanilla NetBSD source: * Add do ... while() macro fix * Remove __inline, _DIAGASSERTs, __P * Remove ill-considered "swap_cnt" switch to insertion sort, * in favor of a simple check for presorted input. * * CAUTION: if you change this file, see also qsort.c * * $PostgreSQL: pgsql/src/port/qsort_arg.c,v 1.4 2007/03/18 05:36:50 neilc Exp $ */ /* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <third_party/qsort_arg.h> #include <stdint.h> #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #ifndef HAVE_OPENMP #error "HAVE_OPENMP macro is not defined" #endif #define min(a, b) (a) < (b) ? a : b static char *med3(char *a, char *b, char *c, int (*cmp)(const void *a, const void *b, void *arg), void *arg); static void swapfunc(char *, char *, size_t, int); /** * @brief Reduce the current number of threads in the thread pool to the * bare minimum. Doesn't prevent the pool from spawning new threads later * if demand mounts. */ static void thread_pool_trim() { /* * Trim OpenMP thread pool. * Though we lack the direct control the workaround below works for * GNU OpenMP library. The library stops surplus threads on entering * a parallel region. Can't go below 2 threads due to the * implementation quirk. */ #pragma omp parallel num_threads(2) ; } /* * Qsort routine based on J. L. Bentley and M. D. McIlroy, * "Engineering a sort function", * Software--Practice and Experience 23 (1993) 1249-1265. * We have modified their original by adding a check for already-sorted input, * which seems to be a win per discussions on pgsql-hackers around 2006-03-21. */ #define swapcode(TYPE, parmi, parmj, n) \ do { \ size_t i = (n) / sizeof (TYPE); \ TYPE *pi = (TYPE *)(void *)(parmi); \ TYPE *pj = (TYPE *)(void *)(parmj); \ do { \ TYPE t = *pi; \ *pi++ = *pj; \ *pj++ = t; \ } while (--i > 0); \ } while (0) #define SWAPINIT(a, es) swaptype = ((char *)(a) - (char *)0) % sizeof(long) || \ (es) % sizeof(long) ? 2 : (es) == sizeof(long)? 0 : 1; static void swapfunc(char *a, char *b, size_t n, int swaptype) { if (swaptype <= 1) swapcode(long, a, b, n); else swapcode(char, a, b, n); } #define swap(a, b) \ if (swaptype == 0) { \ long t = *(long *)(void *)(a); \ *(long *)(void *)(a) = *(long *)(void *)(b); \ *(long *)(void *)(b) = t; \ } else \ swapfunc(a, b, es, swaptype) #define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype) static char * med3(char *a, char *b, char *c, int (*cmp)(const void *a, const void *b, void *arg), void *arg) { return cmp(a, b, arg) < 0 ? (cmp(b, c, arg) < 0 ? b : (cmp(a, c, arg) < 0 ? c : a)) : (cmp(b, c, arg) > 0 ? b : (cmp(a, c, arg) < 0 ? a : c)); } static void qsort_arg_mt_internal(void *a, size_t n, intptr_t es, int (*cmp)(const void *a, const void *b, void *arg), void *arg) { char *pa, *pb, *pc, *pd, *pl, *pm, *pn; intptr_t d, r, swaptype, presorted; loop:SWAPINIT(a, es); if (n < 7) { for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es) for (pl = pm; pl > (char *) a && cmp(pl - es, pl, arg) > 0; pl -= es) swap(pl, pl - es); return; } presorted = 1; for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es) { if (cmp(pm - es, pm, arg) > 0) { presorted = 0; break; } } if (presorted) return; pm = (char *) a + (n / 2) * es; if (n > 7) { pl = (char *) a; pn = (char *) a + (n - 1) * es; if (n > 40) { d = (n / 8) * es; pl = med3(pl, pl + d, pl + 2 * d, cmp, arg); pm = med3(pm - d, pm, pm + d, cmp, arg); pn = med3(pn - 2 * d, pn - d, pn, cmp, arg); } pm = med3(pl, pm, pn, cmp, arg); } swap((char*)a, pm); pa = pb = (char *) a + es; pc = pd = (char *) a + (n - 1) * es; for (;;) { while (pb <= pc && (r = cmp(pb, a, arg)) <= 0) { if (r == 0) { swap(pa, pb); pa += es; } pb += es; } while (pb <= pc && (r = cmp(pc, a, arg)) >= 0) { if (r == 0) { swap(pc, pd); pd -= es; } pc -= es; } if (pb > pc) break; swap(pb, pc); pb += es; pc -= es; } pn = (char *) a + n * es; r = min(pa - (char *) a, pb - pa); vecswap((char*)a, pb - r, r); r = min(pd - pc, pn - pd - es); vecswap(pb, pn - r, r); if ((r = pb - pa) > es) { #pragma omp task qsort_arg_mt_internal(a, r / es, es, cmp, arg); } if ((r = pd - pc) > es) { /* Iterate rather than recurse to save stack space */ a = pn - r; n = r / es; goto loop; } } void qsort_arg_mt(void *a, size_t n, size_t es, int (*cmp)(const void *a, const void *b, void *arg), void *arg) { #pragma omp parallel { #pragma omp single qsort_arg_mt_internal(a, n, es, cmp, arg); } thread_pool_trim(); } #if defined(__cplusplus) } #endif /* defined(__cplusplus) */
GB_binop__band_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint32) // A*D function (colscale): GB (_AxD__band_uint32) // D*A function (rowscale): GB (_DxB__band_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__band_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__band_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint32) // C=scalar+B GB (_bind1st__band_uint32) // C=scalar+B' GB (_bind1st_tran__band_uint32) // C=A+scalar GB (_bind2nd__band_uint32) // C=A'+scalar GB (_bind2nd_tran__band_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__band_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__band_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__band_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
laplace_acc.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <openacc.h> #include <omp.h> // grid size #define GRIDY 8192 #define GRIDX 8192 // smallest permitted change in temperature #define MAX_TEMP_ERROR 0.02 double T_new[GRIDX+2][GRIDY+2]; // temperature grid double T[GRIDX+2][GRIDY+2]; // temperature grid from last iteration // initialisation routine void init(); int main(int argc, char *argv[]) { int i, j; // grid indexes int max_iterations; // maximal number of iterations int iteration=1; // iteration double dt=100; // largest change in temperature struct timeval start_time, stop_time, elapsed_time; // timers int num_threads = 1; // number of OpenMP threads int thread_id = 0; // thread ID int num_devices = 1; // number of GPU devices int device_id = 0; // device ID int chunk_size = 0; // grid size per GPU (X direction) int i_start, i_end; // starting and ending index per GPU (X direction) double dt_global; if(argc!=2) { printf("Usage: %s number_of_iterations\n",argv[0]); exit(1); } else { max_iterations=atoi(argv[1]); } gettimeofday(&start_time,NULL); init(); #pragma omp parallel default(shared) firstprivate(num_threads, thread_id, num_devices, device_id, i_start, i_end, chunk_size,dt,iteration,i,j) { num_threads = omp_get_num_threads(); thread_id = omp_get_thread_num(); num_devices = acc_get_num_devices(acc_device_nvidia); device_id = thread_id % num_devices; acc_set_device_num(device_id, acc_device_nvidia); // calculate the chunk size based on number of threads chunk_size=ceil((1.0*GRIDX)/num_threads); // calculate boundaries and process only inner region i_start = thread_id * chunk_size + 1; i_end = i_start + chunk_size - 1; // simulation iterations #pragma acc data copy(T), create(T_new) while ( dt > MAX_TEMP_ERROR && iteration <= max_iterations ) { // main computational kernel, average over neighbours in the grid #pragma acc kernels for(i = i_start; i <= i_end; i++) for(j = 1; j <= GRIDY; j++) T_new[i][j] = 0.25 * (T[i+1][j] + T[i-1][j] + T[i][j+1] + T[i][j-1]); #pragma acc update self(T[i_start:1][1:GRIDY],T[i_end:1][1:GRIDY]) #pragma omp barrier #pragma acc update device(T[(i_start-1):1][1:GRIDY],T[(i_end+1):1][1:GRIDY]) // reset dt dt = 0.0; #pragma omp single dt_global = 0.0; #pragma omp barrier // compute the largest change and copy T_new to T #pragma acc kernels for(i = i_start; i <= i_end; i++){ for(j = 1; j <= GRIDY; j++){ dt = fmax( fabs(T_new[i][j]-T[i][j]), dt); T[i][j] = T_new[i][j]; } } #pragma omp critical dt_global = fmax(dt,dt_global); #pragma omp barrier dt=dt_global; // periodically print largest change #pragma omp master if((iteration % 100) == 0) printf("Iteration %4.0d, dt %f\n",iteration,dt); iteration++; } } gettimeofday(&stop_time,NULL); timersub(&stop_time, &start_time, &elapsed_time); // measure time printf("Total time was %f seconds.\n", elapsed_time.tv_sec+elapsed_time.tv_usec/1000000.0); return 0; } // initialize grid and boundary conditions void init(){ int i,j; for(i = 0; i <= GRIDX+1; i++){ for (j = 0; j <= GRIDY+1; j++){ T[i][j] = 0.0; } } // these boundary conditions never change throughout run // set left side to 0 and right to a linear increase for(i = 0; i <= GRIDX+1; i++) { T[i][0] = 0.0; T[i][GRIDY+1] = (128.0/GRIDX)*i; } // set top to 0 and bottom to linear increase for(j = 0; j <= GRIDY+1; j++) { T[0][j] = 0.0; T[GRIDX+1][j] = (128.0/GRIDY)*j; } }
getStartLists.c
#include "defs.h" double getStartLists(graph* G, edge** maxIntWtListPtr, INT_T* maxIntWtListSizePtr) { LONG_T *local_max, maxWeight; edge *maxIntWtList; LONG_T maxIntWtListSize; LONG_T *p_start, *p_end; double elapsed_time; elapsed_time = get_seconds(); #ifdef _OPENMP omp_set_num_threads(NUM_THREADS); #pragma omp parallel { #endif LONG_T i, j, n; edge* pList; LONG_T pCount, tmpListSize; int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif n = G->n; /* Determine the maximum edge weight */ if (tid == 0) { local_max = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } /* Allocate memory for partial edge list on each thread */ tmpListSize = 1000; pList = (edge *) malloc(tmpListSize*sizeof(edge)); pCount = 0; #ifdef _OPENMP #pragma omp barrier #endif local_max[tid] = -1; #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { for (j=G->numEdges[i]; j<G->numEdges[i+1]; j++) { if (G->weight[j] > local_max[tid]) { local_max[tid] = G->weight[j]; pCount = 0; pList[pCount].startVertex = i; pList[pCount].endVertex = G->endV[j]; pList[pCount].w = local_max[tid]; pList[pCount].e = j; pCount++; } else if (G->weight[j] == local_max[tid]) { pList[pCount].startVertex = i; pList[pCount].endVertex = G->endV[j]; pList[pCount].w = local_max[tid]; pList[pCount].e = j; pCount++; } } } #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Max. weight computation time: %lf seconds\n", elapsed_time_part); } #endif maxWeight = local_max[0]; for (i=1; i<nthreads; i++) { if (local_max[i] > maxWeight) maxWeight = local_max[i]; } // free(local_max); } #ifdef _OPENMP #pragma omp barrier #endif if (local_max[tid] != maxWeight) { pCount = 0; } /* Merge all te partial edge lists */ if (tid == 0) { p_start = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); p_end = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #endif p_end[tid] = pCount; p_start[tid] = 0; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { for (i=1; i<nthreads; i++) { p_end[i] = p_end[i-1] + p_end[i]; p_start[i] = p_end[i-1]; } maxIntWtListSize = p_end[nthreads-1]; free(*maxIntWtListPtr); maxIntWtList = (edge *) malloc((maxIntWtListSize)*sizeof(edge)); } #ifdef _OPENMP #pragma omp barrier #endif for (j=p_start[tid]; j<p_end[tid]; j++) { (maxIntWtList[j]).startVertex = pList[j-p_start[tid]].startVertex; (maxIntWtList[j]).endVertex = pList[j-p_start[tid]].endVertex; (maxIntWtList[j]).e = pList[j-p_start[tid]].e; (maxIntWtList[j]).w = pList[j-p_start[tid]].w; } #ifdef _OPENMP #pragma omp barrier #endif free(pList); if (tid == 0) { free(local_max); free(p_start); free(p_end); *maxIntWtListPtr = maxIntWtList; *maxIntWtListSizePtr = maxIntWtListSize; } #ifdef _OPENMP } #endif /* Verification */ #if 0 maxIntWtList = *maxIntWtListPtr; for (int i=0; i<*maxIntWtListSizePtr; i++) { fprintf(stderr, "[%ld %ld %ld %ld] ", maxIntWtList[i].startVertex, maxIntWtList[i].endVertex, maxIntWtList[i].e, maxIntWtList[i].w); } #endif elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }
task-taskwait-nested.c
/* * task-taskwait-nested.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include "ompt/ompt-signal.h" #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(8) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) { #pragma omp task shared(var, a) { // wait for master to pass the taskwait OMPT_SIGNAL(a); OMPT_WAIT(a, 2); var++; } } // Give other thread time to steal the task and execute its child. OMPT_WAIT(a, 1); // Only directly generated children are guaranteed to be executed. #pragma omp taskwait OMPT_SIGNAL(a); var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-taskwait-nested.c:34 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-taskwait-nested.c:44 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
declare_variant_ast_print.c
// RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s // expected-no-diagnostics int foo(void); #pragma omp declare variant(foo) match(construct={target}) #pragma omp declare variant(foo) match(construct={teams}) #pragma omp declare variant(foo) match(construct={parallel}) #pragma omp declare variant(foo) match(construct={for}) #pragma omp declare variant(foo) match(construct={simd}) #pragma omp declare variant(foo) match(construct={target,teams,parallel,for,simd}) #pragma omp declare variant(foo) match(xxx={}, yyy={ccc}) #pragma omp declare variant(foo) match(xxx={vvv}) #pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)}, device={kind(fpga)}) #pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx}) #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)}) #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)}, device={kind(cpu, nohost)}) #pragma omp declare variant(foo) match(device={kind(host)}) #pragma omp declare variant(foo) match(device={kind(nohost), xxx}) #pragma omp declare variant(foo) match(implementation={extension(match_all)}) #pragma omp declare variant(foo) match(implementation={extension(match_any)}) #pragma omp declare variant(foo) match(implementation={extension(match_none)}) int bar(void); // CHECK: int foo(); // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_none)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_any)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_all)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(nohost)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(host)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm)}, device={kind(cpu, nohost)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0): llvm)}, device={kind(fpga)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(construct={target, teams, parallel, for, simd}) // CHECK-NEXT: #pragma omp declare variant(foo) match(construct={simd}) // CHECK-NEXT: #pragma omp declare variant(foo) match(construct={for}) // CHECK-NEXT: #pragma omp declare variant(foo) match(construct={parallel}) // CHECK-NEXT: #pragma omp declare variant(foo) match(construct={teams}) // CHECK-NEXT: #pragma omp declare variant(foo) match(construct={target}) // CHECK-NEXT: int bar();
multisort-omp-tree-opcional.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; #define BLOCK_SIZE 1024L #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) { if (length < MIN_MERGE_SIZE*2L) { // Base case basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition #pragma omp task merge(n, left, right, result, start, length/2); #pragma omp task merge(n, left, right, result, start + length/2, length/2); #pragma omp taskwait } } void multisort(long n, T data[n], T tmp[n]) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition #pragma omp task multisort(n/4L, &data[0], &tmp[0]); #pragma omp task multisort(n/4L, &data[n/4L], &tmp[n/4L]); #pragma omp task multisort(n/4L, &data[n/2L], &tmp[n/2L]); #pragma omp task multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]); #pragma omp taskwait #pragma omp task merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L); #pragma omp task merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L); #pragma omp taskwait #pragma omp task merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n); #pragma omp taskwait } else { // Base case basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; #pragma omp parallel for for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; #pragma omp parallel for for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); else { // printf ("data IS ordered; "); } } int main(int argc, char **argv) { if (argc != 4) { fprintf(stderr, "Usage: %s <vector size in K> <sort size in K> <merge size in K>\n", argv[0]); return 1; } N = atol(argv[1]) * BLOCK_SIZE; MIN_SORT_SIZE = atol(argv[2]) * BLOCK_SIZE; MIN_MERGE_SIZE = atol(argv[3]) * BLOCK_SIZE; T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); return 0; }
zero_length_array_section_exit.c
// RUN: %libomptarget-compile-generic -fopenmp-version=51 // RUN: %libomptarget-run-fail-generic 2>&1 \ // RUN: | %fcheck-generic #include <stdio.h> int main() { int arr[5]; // CHECK: addr=0x[[#%x,HOST_ADDR:]] fprintf(stderr, "addr=%p\n", arr); // CHECK-NOT: Libomptarget #pragma omp target enter data map(alloc: arr[0:5]) #pragma omp target exit data map(present, release: arr[0:0]) // CHECK: arr is present fprintf(stderr, "arr is present\n"); // arr[0:0] doesn't create an actual mapping in the first directive. // // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] (0 bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target enter data map(alloc: arr[0:0]) #pragma omp target exit data map(present, release: arr[0:0]) // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
kmp_set_dispatch_buf.c
// RUN: %libomp-compile && %libomp-run 7 // RUN: %libomp-run 0 && %libomp-run -1 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run 7 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 7 #define MY_MAX 200 #define MY_MIN -200 #ifndef MY_SCHEDULE # define MY_SCHEDULE dynamic #endif int num_disp_buffers, num_loops; int a, b, a_known_value, b_known_value; int test_kmp_set_disp_num_buffers() { int success = 1; a = 0; b = 0; // run many small dynamic loops to stress the dispatch buffer system #pragma omp parallel { int i,j; for (j = 0; j < num_loops; j++) { #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } } // detect failure if (a != a_known_value || b != b_known_value) { success = 0; printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); } return success; } int main(int argc, char** argv) { int i,j; int num_failed=0; if (argc != 2) { fprintf(stderr, "usage: %s num_disp_buffers\n", argv[0]); exit(1); } // set the number of dispatch buffers num_disp_buffers = atoi(argv[1]); kmp_set_disp_num_buffers(num_disp_buffers); // figure out the known values to compare with calculated result a_known_value = 0; b_known_value = 0; // if specified to use bad num_disp_buffers set num_loops // to something reasonable if (num_disp_buffers <= 0) num_loops = 10; else num_loops = num_disp_buffers*10; for (j = 0; j < num_loops; j++) { for (i = MY_MIN; i < MY_MAX; i+=INCR) a_known_value++; for (i = MY_MAX; i >= MY_MIN; i-=INCR) b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_disp_num_buffers()) { num_failed++; } } return num_failed; }
nbody_brute_force.c
/* ** nbody_brute_force.c - nbody simulation using the brute-force algorithm (O(n*n)) ** **/ #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <math.h> #include <sys/time.h> #include <assert.h> #include <unistd.h> #include <mpi.h> #include <omp.h> #ifdef DISPLAY #include <X11/Xlib.h> #include <X11/Xutil.h> #endif #include "ui.h" #include "nbody.h" #include "nbody_tools.h" FILE* f_out=NULL; int nparticles=10; /* number of particles */ float T_FINAL=1.0; /* simulation end time */ particle_t*particles; double sum_speed_sq = 0; double max_acc = 0; double max_speed = 0; int ACC_TAG = 97; int SPEED_TAG = 96; double dt = 0.01; int step = 0; void cuda_compute_force(int i, int nparticles, particle_t * p); void init() { /* Nothing to do */ } #ifdef DISPLAY Display *theDisplay; /* These three variables are required to open the */ GC theGC; /* particle plotting window. They are externally */ Window theMain; /* declared in ui.h but are also required here. */ #endif /* compute the force that a particle with position (x_pos, y_pos) and mass 'mass' * applies to particle p */ void compute_force(particle_t*p, double x_pos, double y_pos, double mass) { double x_sep, y_sep, dist_sq, grav_base; x_sep = x_pos - p->x_pos; y_sep = y_pos - p->y_pos; dist_sq = MAX((x_sep*x_sep) + (y_sep*y_sep), 0.01); /* Use the 2-dimensional gravity rule: F = d * (GMm/d^2) */ grav_base = GRAV_CONSTANT*(p->mass)*(mass)/dist_sq; p->x_force += grav_base*x_sep; p->y_force += grav_base*y_sep; } /* compute the new position/velocity */ void move_particle(particle_t*p, double step) { p->x_pos += (p->x_vel)*step; p->y_pos += (p->y_vel)*step; double x_acc = p->x_force/p->mass; double y_acc = p->y_force/p->mass; p->x_vel += x_acc*step; p->y_vel += y_acc*step; /* compute statistics */ double cur_acc = (x_acc*x_acc + y_acc*y_acc); cur_acc = sqrt(cur_acc); double speed_sq = (p->x_vel)*(p->x_vel) + (p->y_vel)*(p->y_vel); double cur_speed = sqrt(speed_sq); sum_speed_sq += speed_sq; max_acc = MAX(max_acc, cur_acc); max_speed = MAX(max_speed, cur_speed); } /* display all the particles */ void draw_all_particles() { int i; for(i=0; i<nparticles; i++) { int x = POS_TO_SCREEN(particles[i].x_pos); int y = POS_TO_SCREEN(particles[i].y_pos); draw_point(x,y); } } void print_all_particles(FILE* f) { int i; for(i=0; i<nparticles; i++) { particle_t*p = &particles[i]; fprintf(f, "particle={pos=(%f,%f), vel=(%f,%f)}\n", p->x_pos, p->y_pos, p->x_vel, p->y_vel); } } /* Simulate the movement of nparticles particles. */ int main(int argc, char**argv) { if(argc >= 2) { nparticles = atoi(argv[1]); } if(argc == 3) { T_FINAL = atof(argv[2]); } init(); int rank, size; /* MPI Initialization */ MPI_Init(&argc, &argv); /* Get the rank of the current task and the number * of MPI processe */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Status status; /* Allocate global shared arrays for the particles data set. */ particles = malloc(sizeof(particle_t)*nparticles); all_init_particles(nparticles, particles); /* Initialize thread data structures */ #ifdef DISPLAY /* Open an X window to display the particles */ simple_init (100,100,DISPLAY_SIZE, DISPLAY_SIZE); #endif double t1, t2, duration; /* Start simulation */ double t = 0.0, dt = 0.01; int i, j; int nums_per_proc = nparticles/(size-1); int root_task = nparticles - nums_per_proc*(size-1); particle_t* par_per_proc; /* Create MPI type for collective communication */ MPI_Datatype particle_mpi_t; int blocklens[1] = {7}; MPI_Aint offsets[1] = {0}; MPI_Datatype types[1] = {MPI_DOUBLE}; MPI_Type_create_struct(1, blocklens, offsets, types, &particle_mpi_t); MPI_Type_commit(&particle_mpi_t); /* Pre-define the displacements, counts for gathering particles[..] from slave procs */ int *displs = NULL; int *counts = NULL; if (rank == 0){ displs = malloc(size * sizeof(int)); counts = malloc(size * sizeof(int)); displs[0] = 0; counts[0] = root_task; for (i = 1; i < size; i++) { displs[i] = root_task + nums_per_proc * (i-1); counts[i] = nums_per_proc; } } while (t < T_FINAL && nparticles > 0) { /* Update time. */ t += dt; /* Move particles with the current and compute rms velocity. */ /* 1. Computing task */ if (rank != 0) { // normal tasks for nums_per_proc in nparticles if(step==0) { par_per_proc = malloc(sizeof(particle_t)*nums_per_proc); if (par_per_proc == NULL) { fprintf(stderr, "Fatal: failed to allocate bytes.\n"); abort(); } } #pragma omp parallel for private(i,j) schedule(dynamic) for (i = root_task + nums_per_proc*(rank-1); i < root_task + nums_per_proc * rank; i++){ particles[i].x_force = 0; particles[i].y_force = 0; cuda_compute_force(i, nparticles, particles); par_per_proc[i-root_task-nums_per_proc*(rank-1)] = particles[i]; } MPI_Send(&max_acc, 1, MPI_DOUBLE, 0, ACC_TAG, MPI_COMM_WORLD); MPI_Send(&max_speed, 1, MPI_DOUBLE, 0, SPEED_TAG, MPI_COMM_WORLD); } else { // rank==0 nums_per_proc = root_task; /* Alloc particles arrays for current proc */ if(step==0) { t1 = MPI_Wtime(); printf("t1 = %f\n", t1); par_per_proc = malloc(sizeof(particle_t)*nums_per_proc); if (par_per_proc == NULL) { fprintf(stderr, "Fatal: failed to allocate bytes.\n"); abort(); } } /* Executing computing task of root */ #pragma omp parallel for private(i,j) schedule(dynamic) for (i = 0; i < nums_per_proc; i++){ particles[i].x_force = 0; particles[i].y_force = 0; for(j = 0; j < nparticles; j++) { particle_t*p = &particles[j]; compute_force(&particles[i], p->x_pos, p->y_pos, p->mass); } par_per_proc[i] = particles[i]; } /* Recv the max_acc and max_speed from other procs */ for (i = 1; i < size; i++) { double max_acc_recv, max_speed_recv; MPI_Recv(&max_acc_recv, 1, MPI_DOUBLE, i, ACC_TAG, MPI_COMM_WORLD, &status); MPI_Recv(&max_speed_recv, 1, MPI_DOUBLE, i, SPEED_TAG, MPI_COMM_WORLD, &status); if (max_acc_recv > max_acc) max_acc = max_acc_recv; if (max_speed_recv > max_speed) max_speed = max_speed_recv; } } MPI_Gatherv(par_per_proc, nums_per_proc, particle_mpi_t, particles, counts, displs, particle_mpi_t, 0, MPI_COMM_WORLD); /* 2. Move task (only in root) */ if(rank == 0) { #pragma omp parallel for private(i) schedule(dynamic) for(i = 0; i < nparticles; i++) { move_particle(&particles[i], dt); } } // send new positions, forces, acc MPI_Bcast(particles, nparticles, particle_mpi_t, 0, MPI_COMM_WORLD); MPI_Bcast(&max_speed, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&max_acc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); /* Adjust dt based on maximum speed and acceleration--this simple rule tries to insure that no velocity will change by more than 10% */ dt = 0.1*max_speed/max_acc; step++; #if DISPLAY clear_display(); draw_all_particles(); flush_display(); #endif } if (rank == 0) { t2 = MPI_Wtime(); printf("t2 = %f\n", t2); duration = t2 - t1; } t2 = MPI_Wtime(); duration = t2 - t1; //double duration = (t2.tv_sec-t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6); #ifdef DUMP_RESULT FILE* f_out = fopen("particles.log", "w"); assert(f_out); print_all_particles(f_out); fclose(f_out); #endif free(par_per_proc); free(particles); if (rank == 0) { printf("-----------------------------\n"); printf("nparticles: %d\n", nparticles); printf("T_FINAL: %f\n", T_FINAL); printf("-----------------------------\n"); printf("Simulation took %lf s to complete\n", duration); } #ifdef DISPLAY clear_display(); draw_all_particles(); flush_display(); printf("Hit return to close the window."); getchar(); /* Close the X window used to display the particles */ XCloseDisplay(theDisplay); #endif MPI_Finalize(); return 0; }
preGraphConstruction.c
/* Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk) This file is part of Velvet. Velvet is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Velvet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Velvet; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #ifdef _OPENMP #include <omp.h> #endif #include "globals.h" #include "preGraph.h" #include "recycleBin.h" #include "roadMap.h" #include "readSet.h" #include "concatenatedPreGraph.h" #include "utility.h" #include "kmer.h" #define ADENINE 0 #define CYTOSINE 1 #define GUANINE 2 #define THYMINE 3 #ifdef _OPENMP Coordinate *annotationOffset = NULL; static omp_lock_t *nodeLocks = NULL; static void createNodeLocks(PreGraph *preGraph) { IDnum nbNodes; IDnum nodeIndex; nbNodes = preNodeCount_pg(preGraph) + 1; if (nodeLocks) free (nodeLocks); nodeLocks = mallocOrExit(nbNodes, omp_lock_t); #pragma omp parallel for for (nodeIndex = 0; nodeIndex < nbNodes; nodeIndex++) omp_init_lock(nodeLocks + nodeIndex); } static void lockNode(IDnum preNodeID) { omp_set_lock(nodeLocks + preNodeID); } static void unLockNode(IDnum preNodeID) { omp_unset_lock(nodeLocks + preNodeID); } static void lockTwoNodes(IDnum preNodeID, IDnum preNode2ID) { if (preNodeID < 0) preNodeID = -preNodeID; if (preNode2ID < 0) preNode2ID = -preNode2ID; /* Lock lowest ID first to avoid deadlocks */ if (preNodeID == preNode2ID) omp_set_lock (nodeLocks + preNodeID); else if (preNodeID < preNode2ID) { omp_set_lock (nodeLocks + preNodeID); omp_set_lock (nodeLocks + preNode2ID); } else { omp_set_lock (nodeLocks + preNode2ID); omp_set_lock (nodeLocks + preNodeID); } } static void unLockTwoNodes(IDnum preNodeID, IDnum preNode2ID) { if (preNodeID < 0) preNodeID = -preNodeID; if (preNode2ID < 0) preNode2ID = -preNode2ID; omp_unset_lock (nodeLocks + preNodeID); if (preNodeID != preNode2ID) omp_unset_lock (nodeLocks + preNode2ID); } #endif // Internal structure used to mark the ends of an Annotation struct insertionMarker_st { Annotation *annot; boolean isStart; } ATTRIBUTE_PACKED; Coordinate getInsertionMarkerPosition(InsertionMarker * marker) { if (marker->isStart) return getStart(marker->annot); else return getFinish(marker->annot); } int compareInsertionMarkers(const void *A, const void *B) { Coordinate Apos = getInsertionMarkerPosition((InsertionMarker *) A); Coordinate Bpos = getInsertionMarkerPosition((InsertionMarker *) B); if (Apos < Bpos) return -1; else if (Apos == Bpos) return 0; else return 1; } // Applies mergeSort to each insertion marker list (in order of position) static void orderInsertionMarkers(InsertionMarker ** insMarkers, IDnum * markerCounters, RoadMapArray * rdmaps) { IDnum sequenceIndex; IDnum sequenceCounter = rdmaps->length; velvetLog("Ordering insertion markers\n"); #ifdef _OPENMP #pragma omp parallel for #endif for (sequenceIndex = 1; sequenceIndex <= sequenceCounter; sequenceIndex++) { qsort(insMarkers[sequenceIndex], markerCounters[sequenceIndex], sizeof(InsertionMarker), compareInsertionMarkers); } } // Creates insertion marker lists static void setInsertionMarkers(RoadMapArray * rdmaps, IDnum * markerCounters, InsertionMarker ** veryLastMarker, InsertionMarker ** insertionMarkers) { IDnum sequenceCounter = rdmaps->length; IDnum sequenceIndex, sequenceIndex2; Coordinate totalCount = 0; RoadMap *rdmap; Annotation *annot = rdmaps->annotations; InsertionMarker *nextMarker, *newMarker; IDnum annotIndex, lastAnnotIndex; InsertionMarker **insMarkers = callocOrExit(rdmaps->length + 1, InsertionMarker *); // Counting insertion markers for (sequenceIndex = 1; sequenceIndex < sequenceCounter + 1; sequenceIndex++) { //velvetLog("Going through sequence %d\n", sequenceIndex); rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1); lastAnnotIndex = getAnnotationCount(rdmap); // Set insertion markers in previous sequences : for (annotIndex = 0; annotIndex < lastAnnotIndex; annotIndex++) { if (getAnnotSequenceID(annot) > 0) { markerCounters[getAnnotSequenceID(annot)] += 2; } else { markerCounters[-getAnnotSequenceID(annot)] += 2; } totalCount += 2; annot = getNextAnnotation(annot); } } // Allocating space *insertionMarkers = callocOrExit(totalCount, InsertionMarker); *veryLastMarker = *insertionMarkers + totalCount; // Pointing each node to its space nextMarker = *insertionMarkers; for (sequenceIndex = 1; sequenceIndex < sequenceCounter + 1; sequenceIndex++) { insMarkers[sequenceIndex] = nextMarker; nextMarker = nextMarker + markerCounters[sequenceIndex]; markerCounters[sequenceIndex] = 0; } // Filling up space with data annot = rdmaps->annotations; for (sequenceIndex = 1; sequenceIndex < sequenceCounter + 1; sequenceIndex++) { //velvetLog("Going through sequence %d\n", sequenceIndex); rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1); lastAnnotIndex = getAnnotationCount(rdmap); // Set insertion markers in previous sequences : for (annotIndex = 0; annotIndex < lastAnnotIndex; annotIndex++) { sequenceIndex2 = getAnnotSequenceID(annot); if (sequenceIndex2 > 0) { newMarker = insMarkers[sequenceIndex2] + (markerCounters[sequenceIndex2])++; newMarker->annot = annot; newMarker->isStart = true; newMarker = insMarkers[sequenceIndex2] + (markerCounters[sequenceIndex2])++; newMarker->annot = annot; newMarker->isStart = false; } else { incrementAnnotationCoordinates(annot); newMarker = insMarkers[-sequenceIndex2] + (markerCounters[-sequenceIndex2])++; newMarker->annot = annot; newMarker->isStart = true; newMarker = insMarkers[-sequenceIndex2] + (markerCounters[-sequenceIndex2])++; newMarker->annot = annot; newMarker->isStart = false; } annot = getNextAnnotation(annot); } } orderInsertionMarkers(insMarkers, markerCounters, rdmaps); free(insMarkers); } // Counts how many preNodes are to be created to allocate appropriate memory static void countPreNodes(RoadMapArray * rdmaps, PreGraph * preGraph, IDnum * markerCounters, InsertionMarker * insertionMarkers, InsertionMarker * veryLastMarker) { Annotation *annot = rdmaps->annotations; InsertionMarker *currentMarker = insertionMarkers; IDnum markerIndex, lastMarkerIndex; IDnum sequenceIndex; Coordinate currentPosition, nextStop; IDnum preNodeCounter = 0; RoadMap *rdmap; IDnum annotIndex, lastAnnotIndex; // Now that we have read all of the annotations, we go on to create the preNodes and tie them up for (sequenceIndex = 1; sequenceIndex <= sequenceCount_pg(preGraph); sequenceIndex++) { rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1); annotIndex = 0; lastAnnotIndex = getAnnotationCount(rdmap); markerIndex = 0; lastMarkerIndex = markerCounters[sequenceIndex]; currentPosition = 0; while (annotIndex < lastAnnotIndex) { if (markerIndex == lastMarkerIndex || getPosition(annot) <= getInsertionMarkerPosition(currentMarker)) nextStop = getPosition(annot); else nextStop = getInsertionMarkerPosition (currentMarker); if (currentPosition != nextStop) { preNodeCounter++; currentPosition = nextStop; } while (markerIndex < lastMarkerIndex && getInsertionMarkerPosition(currentMarker) == currentPosition) { currentMarker++; markerIndex++; } while (annotIndex < lastAnnotIndex && getPosition(annot) == currentPosition) { annot = getNextAnnotation(annot); annotIndex++; } } while (markerIndex < lastMarkerIndex) { if (currentPosition == getInsertionMarkerPosition(currentMarker)) { currentMarker++; markerIndex++; } else { preNodeCounter++; currentPosition = getInsertionMarkerPosition (currentMarker); } } } allocatePreNodeSpace_pg(preGraph, preNodeCounter); } static void convertInsertionMarkers(InsertionMarker * insertionMarkers, InsertionMarker * veryLastMarker, IDnum * chains) { InsertionMarker *marker; Annotation *annot; for (marker = insertionMarkers; marker != veryLastMarker; marker++) { annot = marker->annot; if (getAnnotSequenceID(annot) > 0) { if (marker->isStart) { if (getStartID(annot) == 0) setStartID(annot, chains [getAnnotSequenceID (annot)]); else setStartID(annot, getStartID(annot) + 1); } } else { if (marker->isStart) setStartID(annot, -getStartID(annot)); else { if (getFinishID(annot) == 0) setFinishID(annot, -chains [-getAnnotSequenceID (annot)]); else setFinishID(annot, -getFinishID(annot) - 1); } } } free(insertionMarkers); } static void convertMarker(InsertionMarker * marker, IDnum nodeID) { if (marker->isStart) setStartID(marker->annot, nodeID); else setFinishID(marker->annot, nodeID); } // Creates the preNode using insertion marker and annotation lists for each sequence static void createPreNodes(RoadMapArray * rdmaps, PreGraph * preGraph, IDnum * markerCounters, InsertionMarker * insertionMarkers, InsertionMarker * veryLastMarker, IDnum * chains, char *sequenceFilename, int WORDLENGTH) { Annotation *annot = rdmaps->annotations; IDnum latestPreNodeID; InsertionMarker *currentMarker = insertionMarkers; IDnum sequenceIndex; Coordinate currentPosition, nextStop; IDnum preNodeCounter = 1; FILE *file = fopen(sequenceFilename, "r"); char line[50000]; int lineLength = 50000; Coordinate readIndex; boolean tooShort; Kmer initialKmer; char c; RoadMap *rdmap; IDnum annotIndex, lastAnnotIndex; IDnum markerIndex, lastMarkerIndex; if (file == NULL) exitErrorf(EXIT_FAILURE, true, "Could not read %s", sequenceFilename); // Reading sequence descriptor in first line if (!fgets(line, lineLength, file)) exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename); // Now that we have read all of the annotations, we go on to create the preNodes and tie them up for (sequenceIndex = 1; sequenceIndex <= sequenceCount_pg(preGraph); sequenceIndex++) { if (sequenceIndex % 1000000 == 0) velvetLog("Sequence %li / %li\n", (long) sequenceIndex, (long) sequenceCount_pg(preGraph)); while (line[0] != '>') if (!fgets(line, lineLength, file)) exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename); rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1); annotIndex = 0; lastAnnotIndex = getAnnotationCount(rdmap); markerIndex = 0; lastMarkerIndex = markerCounters[sequenceIndex]; currentPosition = 0; // Reading first (k-1) nucleotides tooShort = false; clearKmer(&initialKmer); //velvetLog("Initial kmer: "); for (readIndex = 0; readIndex < WORDLENGTH - 1; readIndex++) { c = getc(file); while (c == '\n' || c == '\r') c = getc(file); if (c == '>' || c == 'M' || c == EOF) { ungetc(c, file); tooShort = true; break; } switch (c) { case 'A': case 'N': pushNucleotide(&initialKmer, ADENINE); break; case 'C': pushNucleotide(&initialKmer, CYTOSINE); break; case 'G': pushNucleotide(&initialKmer, GUANINE); break; case 'T': pushNucleotide(&initialKmer, THYMINE); break; default: velvetLog ("Irregular sequence file: are you sure your Sequence and Roadmap file come from the same source?\n"); fflush(stdout); abort(); } } if (tooShort) { //velvetLog("Skipping short read.. %d\n", sequenceIndex); chains[sequenceIndex] = preNodeCounter; if (!fgets(line, lineLength, file) && sequenceIndex < sequenceCount_pg(preGraph)) exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename); continue; } latestPreNodeID = 0; while (annotIndex < lastAnnotIndex) { if (markerIndex == lastMarkerIndex || getPosition(annot) <= getInsertionMarkerPosition(currentMarker)) nextStop = getPosition(annot); else { nextStop = getInsertionMarkerPosition (currentMarker); } if (currentPosition != nextStop) { //if (sequenceIndex == 481) // velvetLog("Adding pre nodes from %lli to %lli\n", (long long) currentPosition, (long long) nextStop); addPreNodeToPreGraph_pg(preGraph, currentPosition, nextStop, file, &initialKmer, preNodeCounter); if (latestPreNodeID == 0) { chains[sequenceIndex] = preNodeCounter; } latestPreNodeID = preNodeCounter++; currentPosition = nextStop; } while (markerIndex < lastMarkerIndex && getInsertionMarkerPosition(currentMarker) == nextStop) { convertMarker(currentMarker, latestPreNodeID); currentMarker++; markerIndex++; } while (annotIndex < lastAnnotIndex && getPosition(annot) == nextStop) { for (readIndex = 0; readIndex < getAnnotationLength(annot); readIndex++) { c = getc(file); while (!isalpha(c)) c = getc(file); //if (sequenceIndex == 481) // velvetLog("(%c)", c); switch (c) { case 'A': case 'N': pushNucleotide(&initialKmer, ADENINE); break; case 'C': pushNucleotide(&initialKmer, CYTOSINE); break; case 'G': pushNucleotide(&initialKmer, GUANINE); break; case 'T': pushNucleotide(&initialKmer, THYMINE); break; default: velvetLog ("Irregular sequence file: are you sure your Sequence and Roadmap file come from the same source?\n"); fflush(stdout); #ifdef DEBUG abort(); #endif exit(1); } } annot = getNextAnnotation(annot); annotIndex++; } } while (markerIndex < lastMarkerIndex) { if (currentPosition == getInsertionMarkerPosition(currentMarker)) { convertMarker(currentMarker, latestPreNodeID); currentMarker++; markerIndex++; } else { nextStop = getInsertionMarkerPosition (currentMarker); //if (sequenceIndex == 481) // velvetLog("Adding pre nodes from %lli to %lli\n", (long long) currentPosition, (long long) nextStop); addPreNodeToPreGraph_pg(preGraph, currentPosition, nextStop, file, &initialKmer, preNodeCounter); if (latestPreNodeID == 0) chains[sequenceIndex] = preNodeCounter; latestPreNodeID = preNodeCounter++; currentPosition = getInsertionMarkerPosition (currentMarker); } } // End of sequence if (!fgets(line, lineLength, file) && sequenceIndex < sequenceCount_pg(preGraph)) exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename); //velvetLog(" \n"); if (latestPreNodeID == 0) chains[sequenceIndex] = preNodeCounter; } free(markerCounters); fclose(file); } static void connectPreNodeToTheNext(IDnum * currentPreNodeID, IDnum nextPreNodeID, Coordinate * currentPosition, IDnum sequenceIndex, boolean isReference, PreGraph * preGraph) { if (nextPreNodeID == 0) return; #ifdef _OPENMP lockTwoNodes(*currentPreNodeID, nextPreNodeID); #endif if (isReference) incrementNodeReferenceMarkerCount_pg(preGraph, nextPreNodeID); if (*currentPreNodeID != 0) createPreArc_pg(*currentPreNodeID, nextPreNodeID, preGraph); #ifdef _OPENMP unLockTwoNodes(*currentPreNodeID, nextPreNodeID); #endif *currentPreNodeID = nextPreNodeID; *currentPosition += getPreNodeLength_pg(*currentPreNodeID, preGraph); } static IDnum chooseNextInternalPreNode(IDnum currentPreNodeID, IDnum sequenceIndex, PreGraph * preGraph, IDnum * chains) { if (currentPreNodeID >= preNodeCount_pg(preGraph)) return 0; if (sequenceIndex >= sequenceCount_pg(preGraph)) return currentPreNodeID + 1; if (currentPreNodeID + 1 < chains[sequenceIndex + 1]) return currentPreNodeID + 1; return 0; } static void connectAnnotation(IDnum * currentPreNodeID, Annotation * annot, Coordinate * currentPosition, IDnum sequenceIndex, boolean isReference, PreGraph * preGraph) { IDnum nextPreNodeID = getStartID(annot); connectPreNodeToTheNext(currentPreNodeID, nextPreNodeID, currentPosition, sequenceIndex, isReference, preGraph); while (*currentPreNodeID != getFinishID(annot)) { nextPreNodeID = (*currentPreNodeID) + 1; connectPreNodeToTheNext(currentPreNodeID, nextPreNodeID, currentPosition, sequenceIndex, isReference, preGraph); } } static void reConnectAnnotation(IDnum * currentPreNodeID, Annotation * annot, Coordinate * currentPosition, IDnum sequenceIndex, PreGraph * preGraph, PreMarker ** previous) { IDnum nextPreNodeID = getStartID(annot); #ifdef _OPENMP lockNode(nextPreNodeID); #endif *previous = addPreMarker_pg(preGraph, nextPreNodeID, sequenceIndex, currentPosition, *previous); #ifdef _OPENMP unLockNode(nextPreNodeID); #endif while (*currentPreNodeID != getFinishID(annot)) { nextPreNodeID = (*currentPreNodeID) + 1; #ifdef _OPENMP lockNode(nextPreNodeID); #endif *previous = addPreMarker_pg(preGraph, nextPreNodeID, sequenceIndex, currentPosition, *previous); #ifdef _OPENMP unLockNode(nextPreNodeID); #endif *currentPreNodeID = nextPreNodeID; } } static void createPreMarkers(RoadMapArray * rdmaps, PreGraph * preGraph, IDnum * chains) { IDnum sequenceIndex; IDnum referenceCount = rdmaps->referenceCount; #ifndef _OPENMP Annotation *annot = rdmaps->annotations; #endif #ifdef _OPENMP int threads = omp_get_max_threads(); if (threads > 8) threads = 8; #pragma omp parallel for num_threads(threads) #endif for (sequenceIndex = 1; sequenceIndex <= referenceCount; sequenceIndex++) { #ifdef _OPENMP Annotation *annot = getAnnotationInArray(rdmaps->annotations, annotationOffset[sequenceIndex - 1]); #endif RoadMap *rdmap; Coordinate currentPosition, currentInternalPosition; IDnum currentPreNodeID, nextInternalPreNodeID; IDnum annotIndex, lastAnnotIndex; PreMarker * previous; if (sequenceIndex % 1000000 == 0) velvetLog("Connecting %li / %li\n", (long) sequenceIndex, (long) sequenceCount_pg(preGraph)); rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1); annotIndex = 0; lastAnnotIndex = getAnnotationCount(rdmap); nextInternalPreNodeID = chooseNextInternalPreNode (chains[sequenceIndex] - 1, sequenceIndex, preGraph, chains); previous = NULL; currentPosition = 0; currentInternalPosition = 0; currentPreNodeID = 0; // Recursion up to last annotation while (annotIndex < lastAnnotIndex || nextInternalPreNodeID != 0) { if (annotIndex == lastAnnotIndex || (nextInternalPreNodeID != 0 && currentInternalPosition < getPosition(annot))) { #ifdef _OPENMP lockNode(nextInternalPreNodeID); #endif previous = addPreMarker_pg(preGraph, nextInternalPreNodeID, sequenceIndex, &currentPosition, previous); #ifdef _OPENMP unLockNode(nextInternalPreNodeID); #endif currentPreNodeID = nextInternalPreNodeID; nextInternalPreNodeID = chooseNextInternalPreNode (currentPreNodeID, sequenceIndex, preGraph, chains); currentInternalPosition += getPreNodeLength_pg(currentPreNodeID, preGraph); } else { reConnectAnnotation(&currentPreNodeID, annot, &currentPosition, sequenceIndex, preGraph, &previous); annot = getNextAnnotation(annot); annotIndex++; } } } } // Threads each sequences and creates preArcs according to road map indications static void connectPreNodes(RoadMapArray * rdmaps, PreGraph * preGraph, IDnum * chains) { IDnum sequenceIndex; IDnum referenceCount = rdmaps->referenceCount; #ifdef _OPENMP annotationOffset = mallocOrExit(rdmaps->length + 1, Coordinate); annotationOffset[0] = 0; for (sequenceIndex = 1; sequenceIndex <= rdmaps->length; sequenceIndex++) annotationOffset[sequenceIndex] = annotationOffset[sequenceIndex - 1] + getAnnotationCount(getRoadMapInArray(rdmaps, sequenceIndex - 1)); #else Annotation *annot = rdmaps->annotations; #endif if (rdmaps->referenceCount > 0) allocatePreMarkerCountSpace_pg(preGraph); #ifdef _OPENMP int threads = omp_get_max_threads(); if (threads > 8) threads = 8; #pragma omp parallel for num_threads(threads) #endif for (sequenceIndex = 1; sequenceIndex <= sequenceCount_pg(preGraph); sequenceIndex++) { #ifdef _OPENMP Annotation *annot = getAnnotationInArray(rdmaps->annotations, annotationOffset[sequenceIndex - 1]); #endif RoadMap *rdmap; Coordinate currentPosition, currentInternalPosition; IDnum currentPreNodeID, nextInternalPreNodeID; IDnum annotIndex, lastAnnotIndex; boolean isReference; if (sequenceIndex % 1000000 == 0) velvetLog("Connecting %li / %li\n", (long) sequenceIndex, (long) sequenceCount_pg(preGraph)); rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1); annotIndex = 0; lastAnnotIndex = getAnnotationCount(rdmap); nextInternalPreNodeID = chooseNextInternalPreNode (chains[sequenceIndex] - 1, sequenceIndex, preGraph, chains); isReference = (sequenceIndex <= referenceCount); currentPosition = 0; currentInternalPosition = 0; currentPreNodeID = 0; // Recursion up to last annotation while (annotIndex < lastAnnotIndex || nextInternalPreNodeID != 0) { if (annotIndex == lastAnnotIndex || (nextInternalPreNodeID != 0 && currentInternalPosition < getPosition(annot))) { connectPreNodeToTheNext(&currentPreNodeID, nextInternalPreNodeID, &currentPosition, sequenceIndex, isReference, preGraph); nextInternalPreNodeID = chooseNextInternalPreNode (currentPreNodeID, sequenceIndex, preGraph, chains); currentInternalPosition += getPreNodeLength_pg(currentPreNodeID, preGraph); } else { connectAnnotation(&currentPreNodeID, annot, &currentPosition, sequenceIndex, isReference, preGraph); annot = getNextAnnotation(annot); annotIndex++; } } } if (rdmaps->referenceCount > 0) { allocatePreMarkerSpace_pg(preGraph); createPreMarkers(rdmaps, preGraph, chains); } #ifdef _OPENMP free(annotationOffset); annotationOffset = NULL; #endif } // Post construction memory deallocation routine (of sorts, could certainly be optimized) static void cleanUpMemory(PreGraph * preGraph, RoadMapArray * rdmaps, IDnum * chains) { // Killing off roadmaps destroyRoadMapArray(rdmaps); // Finishing off the chain markers free(chains); } // The full monty, wrapped up in one function PreGraph *newPreGraph_pg(RoadMapArray * rdmapArray, char *sequenceFilename) { int WORDLENGTH = rdmapArray->WORDLENGTH; IDnum sequenceCount = rdmapArray->length; IDnum *markerCounters = callocOrExit(sequenceCount + 1, IDnum); IDnum *chains = callocOrExit(sequenceCount + 1, IDnum); InsertionMarker *insertionMarkers; InsertionMarker *veryLastMarker; PreGraph *preGraph = emptyPreGraph_pg(sequenceCount, rdmapArray->referenceCount, rdmapArray->WORDLENGTH, rdmapArray->double_strand); velvetLog("Creating insertion markers\n"); setInsertionMarkers(rdmapArray, markerCounters, &veryLastMarker, &insertionMarkers); velvetLog("Counting preNodes\n"); countPreNodes(rdmapArray, preGraph, markerCounters, insertionMarkers, veryLastMarker); velvetLog("%li preNodes counted, creating them now\n", (long) preNodeCount_pg(preGraph)); createPreNodes(rdmapArray, preGraph, markerCounters, insertionMarkers, veryLastMarker, chains, sequenceFilename, WORDLENGTH); velvetLog("Adjusting marker info...\n"); convertInsertionMarkers(insertionMarkers, veryLastMarker, chains); #ifdef _OPENMP createNodeLocks(preGraph); #endif velvetLog("Connecting preNodes\n"); connectPreNodes(rdmapArray, preGraph, chains); velvetLog("Cleaning up memory\n"); cleanUpMemory(preGraph, rdmapArray, chains); #ifdef _OPENMP free(nodeLocks); nodeLocks = NULL; #endif velvetLog("Done creating preGraph\n"); return preGraph; }
dftcommon.c
// Copyright Naoki Shibata 2010 - 2017. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <ctype.h> #include <inttypes.h> #include <math.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc.h" #include "sleef.h" #define IMPORT_IS_EXPORT #include "sleefdft.h" #include "dispatchparam.h" #include "dftcommon.h" #include "common.h" #include "arraymap.h" #define MAGIC_FLOAT 0x31415926 #define MAGIC_DOUBLE 0x27182818 #define MAGIC_LONGDOUBLE 0x14142135 #define MAGIC_QUAD 0x33166247 #define MAGIC2D_FLOAT 0x22360679 #define MAGIC2D_DOUBLE 0x17320508 #define MAGIC2D_LONGDOUBLE 0x26457513 #define MAGIC2D_QUAD 0x36055512 const char *configStr[] = { "ST", "ST stream", "MT", "MT stream" }; static int parsePathStr(char *p, int *path, int *config, int pathLenMax, int log2len) { int pathLen = 0, l2l = 0; for(;;) { while(*p == ' ') p++; if (*p == '\0') break; if (!isdigit(*p)) return -1; pathLen++; if (pathLen >= pathLenMax) return -2; int n = 0; while(isdigit(*p)) n = n * 10 + *p++ - '0'; if (n > MAXBUTWIDTH) return -6; path[pathLen-1] = n; l2l += n; config[pathLen-1] = 0; if (*p != '(') continue; int c; for(c=3;c>=0;c--) if (strncmp(p+1, configStr[c], strlen(configStr[c])) == 0) break; if (c == -1) return -3; p += strlen(configStr[c]) + 1; if (*p != ')') return -4; p++; config[pathLen-1] = c; } if (l2l != log2len) return -5; return pathLen; } EXPORT void SleefDFT_setPath(SleefDFT *p, char *pathStr) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); int path[32], config[32]; int pathLen = parsePathStr(pathStr, path, config, 31, p->log2len); if (pathLen < 0) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Error %d in parsing path string : %s\n", pathLen, pathStr); return; } for(uint32_t j = 0;j <= p->log2len;j++) p->bestPath[j] = 0; for(int level = p->log2len, j=0;level > 0 && j < pathLen;) { p->bestPath[level] = path[j]; p->bestPathConfig[level] = config[j]; level -= path[j]; j++; } p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Set path : "); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } } void freeTables(SleefDFT *p) { for(int N=1;N<=MAXBUTWIDTH;N++) { for(uint32_t level=N;level<=p->log2len;level++) { Sleef_free(p->tbl[N][level]); } free(p->tbl[N]); p->tbl[N] = NULL; } } EXPORT void SleefDFT_dispose(SleefDFT *p) { if (p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)) { Sleef_free(p->tBuf); SleefDFT_dispose(p->instH); if (p->hlen != p->vlen) SleefDFT_dispose(p->instV); p->magic = 0; free(p); return; } assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); if (p->log2len <= 1) { p->magic = 0; free(p); return; } if ((p->mode & SLEEF_MODE_REAL) != 0) { Sleef_free(p->rtCoef1); Sleef_free(p->rtCoef0); p->rtCoef0 = p->rtCoef1 = NULL; } for(int level = p->log2len;level >= 1;level--) { Sleef_free(p->perm[level]); } free(p->perm); p->perm = NULL; freeTables(p); p->magic = 0; free(p); } uint32_t ilog2(uint32_t q) { static const uint32_t tab[] = {0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4}; uint32_t r = 0,qq; if (q & 0xffff0000) r = 16; q >>= r; qq = q | (q >> 1); qq |= (qq >> 2); qq = ((qq & 0x10) >> 4) | ((qq & 0x100) >> 7) | ((qq & 0x1000) >> 10); return r + tab[qq] * 4 + tab[q >> (tab[qq] * 4)] - 1; } // char *dftPlanFilePath = NULL; char *archID = NULL; uint64_t planMode = SLEEF_PLAN_REFERTOENVVAR; ArrayMap *planMap = NULL; int planFilePathSet = 0, planFileLoaded = 0; #ifdef _OPENMP omp_lock_t planMapLock; int planMapLockInitialized = 0; #endif static void initPlanMapLock() { #ifdef _OPENMP #pragma omp critical { if (!planMapLockInitialized) { planMapLockInitialized = 1; omp_init_lock(&planMapLock); } } #endif } static void planMap_clear() { if (planMap != NULL) ArrayMap_dispose(planMap); planMap = NULL; } EXPORT void SleefDFT_setPlanFilePath(const char *path, const char *arch, uint64_t mode) { initPlanMapLock(); if ((mode & SLEEF_PLAN_RESET) != 0) { planMap_clear(); planFileLoaded = 0; planFilePathSet = 0; } if (dftPlanFilePath != NULL) free(dftPlanFilePath); if (path != NULL) { dftPlanFilePath = malloc(strlen(path)+10); strcpy(dftPlanFilePath, path); } else { dftPlanFilePath = NULL; } if (archID != NULL) free(archID); if (arch == NULL) arch = Sleef_getCpuIdString(); archID = malloc(strlen(arch)+10); strcpy(archID, arch); planMode = mode; planFilePathSet = 1; } static void loadPlanFromFile() { if (planFilePathSet == 0 && (planMode & SLEEF_PLAN_REFERTOENVVAR) != 0) { char *s = getenv(ENVVAR); if (s != NULL) SleefDFT_setPlanFilePath(s, NULL, planMode); } if (planMap != NULL) ArrayMap_dispose(planMap); if (dftPlanFilePath != NULL && (planMode & SLEEF_PLAN_RESET) == 0) { planMap = ArrayMap_load(dftPlanFilePath, archID, PLANFILEID, (planMode & SLEEF_PLAN_NOLOCK) == 0); } if (planMap == NULL) planMap = initArrayMap(); planFileLoaded = 1; } static void savePlanToFile() { assert(planFileLoaded); if ((planMode & SLEEF_PLAN_READONLY) == 0 && dftPlanFilePath != NULL) { ArrayMap_save(planMap, dftPlanFilePath, archID, PLANFILEID); } } #define CATBIT 8 #define BASETYPEIDBIT 2 #define LOG2LENBIT 8 #define DIRBIT 1 #define BUTSTATBIT 16 static uint64_t keyButStat(int baseTypeID, int log2len, int dir, int butStat) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 0; uint64_t k = 0; k = (k << BUTSTATBIT) | (butStat & ~(~(uint64_t)0 << BUTSTATBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } #define LEVELBIT LOG2LENBIT #define BUTCONFIGBIT 8 #define TRANSCONFIGBIT 8 static uint64_t keyTrans(int baseTypeID, int hlen, int vlen, int transConfig) { int max = MAX(hlen, vlen), min = MIN(hlen, vlen); int cat = 2; uint64_t k = 0; k = (k << TRANSCONFIGBIT) | (transConfig & ~(~(uint64_t)0 << TRANSCONFIGBIT)); k = (k << LOG2LENBIT) | (max & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << LOG2LENBIT) | (min & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t keyPath(int baseTypeID, int log2len, int dir, int level, int config) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 3; uint64_t k = 0; k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT)); k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t keyPathConfig(int baseTypeID, int log2len, int dir, int level, int config) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 4; uint64_t k = 0; k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT)); k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t planMap_getU64(uint64_t key) { char *s = ArrayMap_get(planMap, key); if (s == NULL) return 0; uint64_t ret; if (sscanf(s, "%" SCNx64, &ret) != 1) return 0; return ret; } static void planMap_putU64(uint64_t key, uint64_t value) { char *s = malloc(100); sprintf(s, "%" PRIx64, value); s = ArrayMap_put(planMap, key, s); if (s != NULL) free(s); } int PlanManager_loadMeasurementResultsP(SleefDFT *p, int pathCat) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); int stat = planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)); if (stat == 0) { #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return 0; } int ret = 1; for(int j = p->log2len;j >= 0;j--) { p->bestPath[j] = planMap_getU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat)); p->bestPathConfig[j] = planMap_getU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat)); if (p->bestPath[j] > MAXBUTWIDTH) ret = 0; } p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return ret; } void PlanManager_saveMeasurementResultsP(SleefDFT *p, int pathCat) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); if (planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)) != 0) { #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return; } for(int j = p->log2len;j >= 0;j--) { planMap_putU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPath[j]); planMap_putU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPathConfig[j]); } planMap_putU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10), 1); if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile(); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif } int PlanManager_loadMeasurementResultsT(SleefDFT *p) { assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)); initPlanMapLock(); int ret = 0; #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); p->tmNoMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0)); p->tmMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1)); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return p->tmNoMT != 0; } void PlanManager_saveMeasurementResultsT(SleefDFT *p) { assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)); initPlanMapLock(); int ret = 0; #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0), p->tmNoMT); planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1), p->tmMT ); if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile(); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif }